chore: Update manifests after change

This commit is contained in:
2025-12-04 04:14:55 +00:00
parent 0829fe6c55
commit dad1b4623c
105 changed files with 0 additions and 393558 deletions

View File

@@ -1,227 +0,0 @@
---
# Source: actual/charts/actual/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: actual-data
labels:
app.kubernetes.io/instance: actual
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: actual
helm.sh/chart: actual-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: actual
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "2Gi"
storageClassName: "ceph-block"
---
# Source: actual/charts/actual/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: actual
labels:
app.kubernetes.io/instance: actual
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: actual
app.kubernetes.io/service: actual
helm.sh/chart: actual-4.4.0
namespace: actual
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5006
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: actual
app.kubernetes.io/name: actual
---
# Source: actual/charts/actual/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: actual
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: actual
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: actual
helm.sh/chart: actual-4.4.0
namespace: actual
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: actual
app.kubernetes.io/instance: actual
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: actual
app.kubernetes.io/name: actual
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/actualbudget/actual:25.12.0
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /usr/bin/env
- bash
- -c
- node src/scripts/health-check.js
failureThreshold: 5
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: actual-data
---
# Source: actual/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: actual-data-backup-secret
namespace: actual
labels:
app.kubernetes.io/name: actual-data-backup-secret
app.kubernetes.io/instance: actual
app.kubernetes.io/part-of: actual
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/actual/actual-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: actual/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-actual
namespace: actual
labels:
app.kubernetes.io/name: http-route-actual
app.kubernetes.io/instance: actual
app.kubernetes.io/part-of: actual
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- actual.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: actual
port: 80
weight: 100
---
# Source: actual/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: actual-data-backup-source
namespace: actual
labels:
app.kubernetes.io/name: actual-data-backup-source
app.kubernetes.io/instance: actual
app.kubernetes.io/part-of: actual
spec:
sourcePVC: actual-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: actual-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,471 +0,0 @@
---
# Source: audiobookshelf/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: audiobookshelf-nfs-storage
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-config
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: audiobookshelf
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "2Gi"
storageClassName: "ceph-block"
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-metadata
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: audiobookshelf
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: audiobookshelf/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage-backup
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage-backup
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
# Source: audiobookshelf/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
volumeName: audiobookshelf-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
labels:
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/service: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
namespace: audiobookshelf
spec:
type: ClusterIP
ports:
- port: 8000
targetPort: 8000
protocol: TCP
name: apprise
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/name: audiobookshelf
---
# Source: audiobookshelf/charts/audiobookshelf/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: audiobookshelf
helm.sh/chart: audiobookshelf-4.4.0
namespace: audiobookshelf
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/name: audiobookshelf
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: APPRISE_STORAGE_MODE
value: memory
- name: APPRISE_STATEFUL_MODE
value: disabled
- name: APPRISE_WORKER_COUNT
value: "1"
- name: APPRISE_STATELESS_URLS
valueFrom:
secretKeyRef:
key: ntfy-url
name: audiobookshelf-apprise-config
image: caronc/apprise:1.2.6
imagePullPolicy: IfNotPresent
name: apprise-api
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: TZ
value: US/Central
image: ghcr.io/advplyr/audiobookshelf:2.31.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /mnt/store/
name: audiobooks
- mountPath: /metadata/backups
name: backup
- mountPath: /config
name: config
- mountPath: /metadata
name: metadata
volumes:
- name: audiobooks
persistentVolumeClaim:
claimName: audiobookshelf-nfs-storage
- name: backup
persistentVolumeClaim:
claimName: audiobookshelf-nfs-storage-backup
- name: config
persistentVolumeClaim:
claimName: audiobookshelf-config
- name: metadata
persistentVolumeClaim:
claimName: audiobookshelf-metadata
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-apprise-config
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-apprise-config
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/audiobookshelf/apprise
metadataPolicy: None
property: ntfy-url
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-config-backup-secret
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-secret
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: audiobookshelf/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-metadata-backup-secret
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-secret
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/audiobookshelf/audiobookshelf-metadata"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: audiobookshelf/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-audiobookshelf
namespace: audiobookshelf
labels:
app.kubernetes.io/name: http-route-audiobookshelf
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100
---
# Source: audiobookshelf/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-config-backup-source
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-source
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
sourcePVC: audiobookshelf-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: audiobookshelf/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-metadata-backup-source
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-source
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
sourcePVC: audiobookshelf-metadata
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-metadata-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: audiobookshelf/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: audiobookshelf-apprise
namespace: audiobookshelf
labels:
app.kubernetes.io/name: audiobookshelf-apprise
app.kubernetes.io/instance: audiobookshelf
app.kubernetes.io/part-of: audiobookshelf
spec:
endpoints:
- port: apprise
interval: 30s
scrapeTimeout: 15s
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf

File diff suppressed because it is too large Load Diff

View File

@@ -1,289 +0,0 @@
---
# Source: backrest/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: backrest-nfs-storage
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-storage
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: backrest/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: backrest-nfs-share
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-share
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Share
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: backrest/charts/backrest/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: backrest-config
labels:
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
helm.sh/chart: backrest-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: backrest
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"
storageClassName: "ceph-block"
---
# Source: backrest/charts/backrest/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: backrest-data
labels:
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
helm.sh/chart: backrest-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: backrest
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: backrest/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backrest-nfs-storage
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-storage
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
volumeName: backrest-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: backrest/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backrest-nfs-share
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-share
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
volumeName: backrest-nfs-share
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: backrest/charts/backrest/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: backrest
labels:
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
app.kubernetes.io/service: backrest
helm.sh/chart: backrest-4.4.0
namespace: backrest
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9898
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: backrest
app.kubernetes.io/name: backrest
---
# Source: backrest/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: backrest
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: backrest/charts/backrest/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: backrest
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
helm.sh/chart: backrest-4.4.0
namespace: backrest
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: backrest
app.kubernetes.io/instance: backrest
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: backrest
app.kubernetes.io/name: backrest
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: America/Chicago
- name: BACKREST_DATA
value: /data
- name: BACKREST_CONFIG
value: /config/config.json
- name: XDG_CACHE_HOME
value: /cache
- name: TMPDIR
value: /tmp
image: garethgeorge/backrest:v1.10.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /cache
name: cache
- mountPath: /config
name: config
- mountPath: /data
name: data
- mountPath: /mnt/share
name: share
readOnly: true
- mountPath: /mnt/storage
name: storage
readOnly: true
- mountPath: /tmp
name: tmp
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: backrest-config
- name: data
persistentVolumeClaim:
claimName: backrest-data
- name: share
persistentVolumeClaim:
claimName: backrest-nfs-share
- name: storage
persistentVolumeClaim:
claimName: backrest-nfs-storage
- emptyDir: {}
name: tmp
---
# Source: backrest/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-backrest
namespace: backrest
labels:
app.kubernetes.io/name: http-route-backrest
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- backrest.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: backrest
port: 80
weight: 100

View File

@@ -1,278 +0,0 @@
---
# Source: bazarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: bazarr-nfs-storage
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-nfs-storage
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: bazarr/charts/bazarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: bazarr-config
labels:
app.kubernetes.io/instance: bazarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: bazarr
helm.sh/chart: bazarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: bazarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: bazarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bazarr-nfs-storage
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-nfs-storage
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
volumeName: bazarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: bazarr/charts/bazarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: bazarr
labels:
app.kubernetes.io/instance: bazarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: bazarr
app.kubernetes.io/service: bazarr
helm.sh/chart: bazarr-4.4.0
namespace: bazarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 6767
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: bazarr
app.kubernetes.io/name: bazarr
---
# Source: bazarr/charts/bazarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: bazarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: bazarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: bazarr
helm.sh/chart: bazarr-4.4.0
namespace: bazarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: bazarr
app.kubernetes.io/instance: bazarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: bazarr
app.kubernetes.io/name: bazarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/bazarr:1.5.3@sha256:ec11e988e8e13411c994a4d9f43ed9b97409aa92c1da54d9f23926c3da7c2032
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
volumes:
- name: config
persistentVolumeClaim:
claimName: bazarr-config
- name: media
persistentVolumeClaim:
claimName: bazarr-nfs-storage
---
# Source: bazarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: bazarr-config-backup-secret
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-config-backup-secret
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/bazarr/bazarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: bazarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-bazarr
namespace: bazarr
labels:
app.kubernetes.io/name: http-route-bazarr
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- bazarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: bazarr
port: 80
weight: 100
---
# Source: bazarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: bazarr-config-backup-source
namespace: bazarr
labels:
app.kubernetes.io/name: bazarr-config-backup-source
app.kubernetes.io/instance: bazarr
app.kubernetes.io/part-of: bazarr
spec:
sourcePVC: bazarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: bazarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -1,448 +0,0 @@
---
# Source: blocky/charts/blocky/templates/common.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: blocky
labels:
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
helm.sh/chart: blocky-4.4.0
namespace: blocky
data:
config.yml: |
upstreams:
init:
strategy: fast
groups:
default:
- tcp-tls:1.1.1.1:853
- tcp-tls:1.0.0.1:853
strategy: parallel_best
timeout: 2s
connectIPVersion: v4
customDNS:
filterUnmappedTypes: false
zone: |
$ORIGIN alexlebens.net.
$TTL 86400
;; Name Server
IN NS patryk.ns.cloudflare.com.
IN NS veda.ns.cloudflare.com.
IN NS dns1.
IN NS dns2.
IN NS dns3.
dns1 IN A 10.232.1.22
dns2 IN A 10.232.1.51
dns3 IN A 10.232.1.52
;; Computer Names
nw01un IN A 192.168.1.1 ; Unifi Gateway
ps08rp IN A 10.232.1.51 ; DNS
ps09rp IN A 10.232.1.52 ; DNS
ps02sn IN A 10.232.1.61 ; Synology Web
ps02sn-bond IN A 10.232.1.64 ; Synology Bond for Storage
pd05wd IN A 10.230.0.115 ; Desktop
pl02mc IN A 10.230.0.105 ; Laptop
dv01hr IN A 10.232.1.72 ; HD Homerun
dv02kv IN A 10.232.1.71 ; Pi KVM
it01ag IN A 10.232.1.83 ; Airgradient
it02ph IN A 10.232.1.85 ; Phillips Hue
it03tb IN A 10.232.1.81 ; TubesZB ZigBee
it04tb IN A 10.232.1.82 ; TubesZB Z-Wave
it05sp IN A 10.230.0.100 ; Shelly Plug
;; Common Names
synology IN CNAME ps02sn
synologybond IN CNAME ps02sn-bond
unifi IN CNAME nw01un
airgradient IN CNAME it01ag
hdhr IN CNAME dv01hr
pikvm IN CNAME dv02kv
;; Service Names
cl01tl IN A 10.232.1.11
cl01tl IN A 10.232.1.12
cl01tl IN A 10.232.1.13
cl01tl-api IN A 10.232.1.11
cl01tl-api IN A 10.232.1.12
cl01tl-api IN A 10.232.1.13
cl01tl-endpoint IN A 10.232.1.21
cl01tl-endpoint IN A 10.232.1.22
cl01tl-endpoint IN A 10.232.1.23
cl01tl-gateway IN A 10.232.1.200
traefik-cl01tl IN A 10.232.1.21
blocky IN A 10.232.1.22
plex-lb IN A 10.232.1.23
;; Application Names
actual IN CNAME traefik-cl01tl
alertmanager IN CNAME traefik-cl01tl
argo-workflows IN CNAME traefik-cl01tl
argocd IN CNAME traefik-cl01tl
audiobookshelf IN CNAME traefik-cl01tl
authentik IN CNAME traefik-cl01tl
backrest IN CNAME traefik-cl01tl
bazarr IN CNAME traefik-cl01tl
booklore IN CNAME traefik-cl01tl
ceph IN CNAME traefik-cl01tl
code-server IN CNAME traefik-cl01tl
ephemera IN CNAME traefik-cl01tl
garage-s3 IN CNAME traefik-cl01tl
garage-webui IN CNAME traefik-cl01tl
gatus IN CNAME traefik-cl01tl
gitea IN CNAME traefik-cl01tl
grafana IN CNAME traefik-cl01tl
harbor IN CNAME traefik-cl01tl
headlamp IN CNAME traefik-cl01tl
home IN CNAME traefik-cl01tl
home-assistant IN CNAME traefik-cl01tl
home-assistant-code-server IN CNAME traefik-cl01tl
hubble IN CNAME cl01tl-gateway
huntarr IN CNAME traefik-cl01tl
immich IN CNAME traefik-cl01tl
jellyfin IN CNAME traefik-cl01tl
jellystat IN CNAME traefik-cl01tl
kiwix IN CNAME traefik-cl01tl
komodo IN CNAME traefik-cl01tl
kronic IN CNAME traefik-cl01tl
lidarr IN CNAME traefik-cl01tl
lidatube IN CNAME traefik-cl01tl
listenarr IN CNAME traefik-cl01tl
mail IN CNAME traefik-cl01tl
n8n IN CNAME traefik-cl01tl
ntfy IN CNAME traefik-cl01tl
objects IN CNAME traefik-cl01tl
ollama IN CNAME traefik-cl01tl
omni-tools IN CNAME traefik-cl01tl
overseerr IN CNAME traefik-cl01tl
pgadmin IN CNAME traefik-cl01tl
photoview IN CNAME traefik-cl01tl
plex IN CNAME traefik-cl01tl
postiz IN CNAME traefik-cl01tl
prometheus IN CNAME traefik-cl01tl
prowlarr IN CNAME traefik-cl01tl
qbittorrent IN CNAME traefik-cl01tl
qui IN CNAME traefik-cl01tl
radarr IN CNAME traefik-cl01tl
radarr-4k IN CNAME traefik-cl01tl
radarr-anime IN CNAME traefik-cl01tl
radarr-standup IN CNAME traefik-cl01tl
searxng IN CNAME traefik-cl01tl
slskd IN CNAME traefik-cl01tl
sonarr IN CNAME traefik-cl01tl
sonarr-4k IN CNAME traefik-cl01tl
sonarr-anime IN CNAME traefik-cl01tl
stalwart IN CNAME traefik-cl01tl
tautulli IN CNAME traefik-cl01tl
tdarr IN CNAME traefik-cl01tl
tubearchivist IN CNAME traefik-cl01tl
vault IN CNAME traefik-cl01tl
whodb IN CNAME traefik-cl01tl
yamtrack IN CNAME traefik-cl01tl
blocking:
denylists:
sus:
- https://v.firebog.net/hosts/static/w3kbl.txt
ads:
- https://v.firebog.net/hosts/AdguardDNS.txt
- https://v.firebog.net/hosts/Admiral.txt
- https://v.firebog.net/hosts/Easylist.txt
- https://adaway.org/hosts.txt
priv:
- https://v.firebog.net/hosts/Easyprivacy.txt
- https://v.firebog.net/hosts/Prigent-Ads.txt
mal:
- https://v.firebog.net/hosts/Prigent-Crypto.txt
- https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt
pro:
- https://raw.githubusercontent.com/hagezi/dns-blocklists/main/wildcard/pro.plus.txt
allowlists:
sus:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
ads:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
priv:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
mal:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
pro:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
clientGroupsBlock:
default:
- sus
- ads
- priv
- mal
- pro
blockType: zeroIp
blockTTL: 1m
loading:
refreshPeriod: 24h
downloads:
timeout: 60s
attempts: 5
cooldown: 10s
concurrency: 16
strategy: fast
maxErrorsPerSource: 5
caching:
minTime: 5m
maxTime: 30m
maxItemsCount: 0
prefetching: true
prefetchExpires: 2h
prefetchThreshold: 5
prefetchMaxItemsCount: 0
cacheTimeNegative: 30m
redis:
address: redis-replication-blocky-master.blocky:6379
required: true
prometheus:
enable: true
path: /metrics
queryLog:
type: console
logRetentionDays: 7
creationAttempts: 1
creationCooldown: 2s
flushInterval: 30s
minTlsServeVersion: 1.3
ports:
dns: 53
http: 4000
log:
level: info
format: text
timestamp: true
privacy: false
---
# Source: blocky/charts/blocky/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: blocky-dns-external
labels:
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
app.kubernetes.io/service: blocky-dns-external
helm.sh/chart: blocky-4.4.0
annotations:
tailscale.com/expose: "true"
namespace: blocky
spec:
type: LoadBalancer
ports:
- port: 53
targetPort: 53
protocol: TCP
name: tcp
- port: 53
targetPort: 53
protocol: UDP
name: udp
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/name: blocky
---
# Source: blocky/charts/blocky/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: blocky-metrics
labels:
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
app.kubernetes.io/service: blocky-metrics
helm.sh/chart: blocky-4.4.0
namespace: blocky
spec:
type: ClusterIP
ports:
- port: 4000
targetPort: 4000
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/name: blocky
---
# Source: blocky/charts/blocky/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: blocky
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
helm.sh/chart: blocky-4.4.0
namespace: blocky
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
template:
metadata:
annotations:
checksum/configMaps: 8a197f81daed9048c4565ecafc0c7ca534383a898e709a13c3441bc00bd12652
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/name: blocky
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/0xerr0r/blocky:v0.28.2@sha256:5f84a54e4ee950c4ab21db905b7497476ece2f4e1a376d23ab8c4855cabddcba
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config.yml
mountPropagation: None
name: config
readOnly: true
subPath: config.yml
volumes:
- configMap:
name: blocky
name: config
---
# Source: blocky/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-blocky
namespace: blocky
labels:
app.kubernetes.io/name: redis-replication-blocky
app.kubernetes.io/instance: blocky
app.kubernetes.io/part-of: blocky
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: blocky/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: blocky
namespace: blocky
labels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
app.kubernetes.io/part-of: blocky
spec:
selector:
matchLabels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics
---
# Source: blocky/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-blocky
namespace: blocky
labels:
app.kubernetes.io/name: redis-replication-blocky
app.kubernetes.io/instance: blocky
app.kubernetes.io/part-of: blocky
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -1,946 +0,0 @@
---
# Source: booklore/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: booklore
annotations:
volsync.backube/privileged-movers: "true"
labels:
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
---
# Source: booklore/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: booklore-books-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: booklore/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: booklore-books-import-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-import-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books Import
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: booklore/charts/booklore/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: booklore-config
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: booklore
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: booklore/charts/booklore/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: booklore-data
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: booklore
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: booklore/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: booklore-books-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
volumeName: booklore-books-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: booklore/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: booklore-books-import-nfs-storage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-books-import-nfs-storage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
volumeName: booklore-books-import-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: booklore/charts/booklore/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: booklore
labels:
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
app.kubernetes.io/service: booklore
helm.sh/chart: booklore-4.4.0
namespace: booklore
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 6060
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/name: booklore
---
# Source: booklore/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: booklore
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: booklore/charts/booklore/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: booklore
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: booklore
helm.sh/chart: booklore-4.4.0
namespace: booklore
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: booklore
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: booklore
app.kubernetes.io/name: booklore
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: America/Chicago
- name: DATABASE_URL
value: jdbc:mariadb://booklore-mariadb-cluster-primary.booklore:3306/booklore
- name: DATABASE_USERNAME
value: booklore
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: booklore-database-secret
- name: BOOKLORE_PORT
value: "6060"
- name: SWAGGER_ENABLED
value: "false"
image: ghcr.io/booklore-app/booklore:v1.13.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /bookdrop
name: books-import
- mountPath: /app/data
name: config
- mountPath: /data
name: data
- mountPath: /bookdrop/ingest
name: ingest
volumes:
- emptyDir: {}
name: books-import
- name: config
persistentVolumeClaim:
claimName: booklore-config
- name: data
persistentVolumeClaim:
claimName: booklore-data
- name: ingest
persistentVolumeClaim:
claimName: booklore-books-import-nfs-storage
---
# Source: booklore/charts/mariadb-cluster/templates/database.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: Database
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
characterSet: utf8
cleanupPolicy: Delete
collate: utf8_general_ci
name: booklore
requeueInterval: 10h
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-database-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-database-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/booklore/database
metadataPolicy: None
property: password
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-replication-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: psk.txt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/booklore/replication
metadataPolicy: None
property: psk.txt
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-config-backup-secret
namespace: booklore
labels:
app.kubernetes.io/name: booklore-config-backup-secret
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-local
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-local
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-remote
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-remote
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-secret-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-mariadb-cluster-backup-secret-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: access
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/mariadb-backups
metadataPolicy: None
property: access
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/mariadb-backups
metadataPolicy: None
property: secret
---
# Source: booklore/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-mariadb-cluster-backup-secret-garage
namespace: booklore
labels:
app.kubernetes.io/name: booklore-mariadb-cluster-backup-secret-garage
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: access
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/mariadb-backups
metadataPolicy: None
property: access
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/mariadb-backups
metadataPolicy: None
property: secret
---
# Source: booklore/charts/mariadb-cluster/templates/grant.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: Grant
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
cleanupPolicy: Delete
database: booklore
grantOption: true
host: '%'
privileges:
- ALL PRIVILEGES
requeueInterval: 10h
retryInterval: 30s
table: '*'
username: booklore
---
# Source: booklore/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-booklore
namespace: booklore
labels:
app.kubernetes.io/name: http-route-booklore
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100
---
# Source: booklore/charts/mariadb-cluster/templates/mariadb.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: MariaDB
metadata:
name: booklore-mariadb-cluster
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
galera:
enabled: true
replicas: 3
rootPasswordSecretKeyRef:
generate: false
key: password
name: booklore-database-secret
storage:
size: 5Gi
---
# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: PhysicalBackup
metadata:
name: booklore-mariadb-cluster-backup-external
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
compression: gzip
maxRetention: 720h
schedule:
cron: 0 0 * * 0
immediate: true
suspend: false
storage:
s3:
accessKeyIdSecretKeyRef:
key: access
name: booklore-mariadb-cluster-backup-secret-external
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
endpoint: nyc3.digitaloceanspaces.com
prefix: cl01tl/booklore
region: us-east-1
secretAccessKeySecretKeyRef:
key: secret
name: booklore-mariadb-cluster-backup-secret-external
tls:
enabled: true
---
# Source: booklore/charts/mariadb-cluster/templates/physicalbackup.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: PhysicalBackup
metadata:
name: booklore-mariadb-cluster-backup-garage
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
compression: gzip
maxRetention: 360h
schedule:
cron: 0 0 * * *
immediate: true
suspend: false
storage:
s3:
accessKeyIdSecretKeyRef:
key: access
name: booklore-mariadb-cluster-backup-secret-garage
bucket: mariadb-backups
endpoint: garage-main.garage:3900
prefix: cl01tl/booklore
region: us-east-1
secretAccessKeySecretKeyRef:
key: secret
name: booklore-mariadb-cluster-backup-secret-garage
---
# Source: booklore/templates/replication-destination.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination
metadata:
name: booklore-data-replication-destination
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-destination
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
rsyncTLS:
copyMethod: Direct
accessModes: ["ReadWriteMany"]
destinationPVC: booklore-books-nfs-storage
keySecret: booklore-data-replication-secret
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-replication-source
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-replication-source
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: "0 0 * * *"
rsyncTLS:
keySecret: booklore-data-replication-secret
address: volsync-rsync-tls-dst-booklore-data-replication-destination
copyMethod: Snapshot
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-config-backup-source
namespace: booklore
labels:
app.kubernetes.io/name: booklore-config-backup-source
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-local
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-local
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 2 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-local
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-remote
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-remote
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 3 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-remote
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-external
namespace: booklore
labels:
app.kubernetes.io/name: booklore-data-backup-source-external
app.kubernetes.io/instance: booklore
app.kubernetes.io/part-of: booklore
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-external
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
# Source: booklore/charts/mariadb-cluster/templates/user.yaml
apiVersion: k8s.mariadb.com/v1alpha1
kind: User
metadata:
name: booklore-mariadb-cluster-booklore
namespace: booklore
labels:
helm.sh/chart: mariadb-cluster-25.10.2
app.kubernetes.io/name: mariadb-cluster
app.kubernetes.io/instance: booklore
app.kubernetes.io/version: "0.0.0"
app.kubernetes.io/managed-by: Helm
spec:
mariaDbRef:
name: booklore-mariadb-cluster
namespace: booklore
cleanupPolicy: Delete
host: '%'
name: booklore
passwordSecretKeyRef:
key: password
name: booklore-database-secret
requeueInterval: 10h
retryInterval: 30s

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@@ -1,251 +0,0 @@
---
# Source: code-server/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: code-server-nfs-storage
namespace: code-server
labels:
app.kubernetes.io/name: code-server-nfs-storage
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
# Source: code-server/charts/code-server/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: code-server
labels:
app.kubernetes.io/instance: code-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: code-server
app.kubernetes.io/service: code-server
helm.sh/chart: code-server-4.4.0
namespace: code-server
spec:
type: ClusterIP
ports:
- port: 8443
targetPort: 8443
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/name: code-server
---
# Source: code-server/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: code-server-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.1
namespace: code-server
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: code-server
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: code-server-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: code-server/charts/code-server/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: code-server
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: code-server
helm.sh/chart: code-server-4.4.0
namespace: code-server
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: code-server
app.kubernetes.io/instance: code-server
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: code-server
app.kubernetes.io/name: code-server
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: DEFAULT_WORKSPACE
value: /config
envFrom:
- secretRef:
name: codeserver-password-secret
image: ghcr.io/linuxserver/code-server:4.106.3@sha256:aab9520fe923b2d93dccc2c806f3dc60649c2f4a2847fcd40c942227d0f1ae8f
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: code-server-nfs-storage
---
# Source: code-server/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: codeserver-password-secret
namespace: code-server
labels:
app.kubernetes.io/name: codeserver-password-secret
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/code-server/auth
metadataPolicy: None
property: PASSWORD
- secretKey: SUDO_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/code-server/auth
metadataPolicy: None
property: SUDO_PASSWORD
---
# Source: code-server/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: code-server-cloudflared-secret
namespace: code-server
labels:
app.kubernetes.io/name: code-server-cloudflared-secret
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/codeserver
metadataPolicy: None
property: token
---
# Source: code-server/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-code-server
namespace: code-server
labels:
app.kubernetes.io/name: http-route-code-server
app.kubernetes.io/instance: code-server
app.kubernetes.io/part-of: code-server
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- code-server.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: code-server
port: 8443
weight: 100

View File

@@ -1,300 +0,0 @@
---
# Source: coredns/charts/coredns/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
---
# Source: coredns/charts/coredns/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
data:
Corefile: |-
dns://.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
dns://alexlebens.net:53 {
errors
cache 30
forward . 10.111.232.172
}
dns://ts.net:53 {
errors
cache 30
forward . 10.97.20.219
}
---
# Source: coredns/charts/coredns/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: coredns
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
# Source: coredns/charts/coredns/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: coredns
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
# Source: coredns/charts/coredns/templates/service-metrics.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns-metrics
namespace: kube-system
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
app.kubernetes.io/component: metrics
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
spec:
selector:
app.kubernetes.io/instance: "coredns"
k8s-app: coredns
app.kubernetes.io/name: coredns
ports:
- name: metrics
port: 9153
targetPort: 9153
---
# Source: coredns/charts/coredns/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
spec:
selector:
app.kubernetes.io/instance: "coredns"
k8s-app: coredns
app.kubernetes.io/name: coredns
clusterIP: 10.96.0.10
clusterIPs:
- 10.96.0.10
ports:
- {"name":"udp-53","port":53,"protocol":"UDP","targetPort":53}
- {"name":"tcp-53","port":53,"protocol":"TCP","targetPort":53}
type: ClusterIP
---
# Source: coredns/charts/coredns/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
app.kubernetes.io/version: "v1.13.1"
spec:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
selector:
matchLabels:
app.kubernetes.io/instance: "coredns"
k8s-app: coredns
app.kubernetes.io/name: coredns
template:
metadata:
labels:
k8s-app: coredns
app.kubernetes.io/name: coredns
app.kubernetes.io/instance: "coredns"
annotations:
checksum/config: 6f07144a3d5dc8ad880e010546e8deee6bb3a150eb089529d925c21f2a78a7d0
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
terminationGracePeriodSeconds: 30
serviceAccountName: coredns
priorityClassName: "system-cluster-critical"
dnsPolicy: Default
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
nodeSelector:
kubernetes.io/os: linux
containers:
- name: "coredns"
image: "registry.k8s.io/coredns/coredns:v1.13.1"
imagePullPolicy: IfNotPresent
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 128Mi
ports:
- {"containerPort":53,"name":"udp-53","protocol":"UDP"}
- {"containerPort":53,"name":"tcp-53","protocol":"TCP"}
- {"containerPort":9153,"name":"tcp-9153","protocol":"TCP"}
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
# Source: coredns/charts/coredns/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: coredns
namespace: kube-system
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.45.0"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
spec:
selector:
matchLabels:
app.kubernetes.io/instance: "coredns"
k8s-app: coredns
app.kubernetes.io/name: coredns
app.kubernetes.io/component: metrics
endpoints:
- port: metrics

View File

@@ -1,720 +0,0 @@
---
# Source: democratic-csi-synology-iscsi/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: democratic-csi-synology-iscsi
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/part-of: democratic-csi-synology-iscsi
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/controller-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: democratic-csi-synology-iscsi-controller-sa
namespace: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/node-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: democratic-csi-synology-iscsi-node-sa
namespace: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: democratic-csi-synology-iscsi
namespace: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
data:
extra-ca-certs: ""
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/storage-classes.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: synology-iscsi-delete
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
provisioner: org.democratic-csi.iscsi-synology
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: Immediate
parameters:
fsType: "ext4"
# this loop is deeply connected to the loop for Secret creation below
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/storage-classes.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: synology-iscsi-retain
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
provisioner: org.democratic-csi.iscsi-synology
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: Immediate
parameters:
fsType: "ext4"
# this loop is deeply connected to the loop for Secret creation below
# this loop is deeply connected to the loop for secret parameter settings above
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/controller-rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: democratic-csi-synology-iscsi-controller-cr
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
rules:
# Allow listing and creating CRDs
- apiGroups: ['apiextensions.k8s.io']
resources: ['customresourcedefinitions']
verbs: ['list', 'create']
- apiGroups: ['']
resources: ['persistentvolumes']
verbs: ['create', 'delete', 'get', 'list', 'watch', 'update', 'patch']
- apiGroups: ['']
resources: ['secrets']
verbs: ['get', 'list']
- apiGroups: ['']
resources: ['pods']
verbs: ['get', 'list', 'watch']
- apiGroups: ['']
resources: ['persistentvolumeclaims']
verbs: ['get', 'list', 'watch', 'update', 'patch']
- apiGroups: ['']
resources: ['persistentvolumeclaims/status']
verbs: ['get', 'list', 'watch', 'update', 'patch']
- apiGroups: ['']
resources: ['nodes']
verbs: ['get', 'list', 'watch']
- apiGroups: ['storage.k8s.io']
resources: ['volumeattachments']
verbs: ['get', 'list', 'watch', 'update', 'patch']
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ['storage.k8s.io']
resources: ['storageclasses']
verbs: ['get', 'list', 'watch']
- apiGroups: ['csi.storage.k8s.io']
resources: ['csidrivers']
verbs: ['get', 'list', 'watch', 'update', 'create']
- apiGroups: ['']
resources: ['events']
verbs: ['list', 'watch', 'create', 'update', 'patch']
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ['snapshot.storage.k8s.io']
resources: ['volumesnapshots/status']
verbs: ["create", "get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["create", "get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["create", "get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
# capacity rbac
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get"]
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/node-rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: democratic-csi-synology-iscsi-node-cr
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
rules:
# Allow listing and creating CRDs
- apiGroups: ['apiextensions.k8s.io']
resources: ['customresourcedefinitions']
verbs: ['list', 'create']
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/controller-rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: democratic-csi-synology-iscsi-controller-rb
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: democratic-csi-synology-iscsi-controller-cr
subjects:
- kind: ServiceAccount
name: democratic-csi-synology-iscsi-controller-sa
namespace: democratic-csi-synology-iscsi
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/node-rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: democratic-csi-synology-iscsi-node-rb
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: democratic-csi-synology-iscsi-node-cr
subjects:
- kind: ServiceAccount
name: democratic-csi-synology-iscsi-node-sa
namespace: democratic-csi-synology-iscsi
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/node.yaml
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: democratic-csi-synology-iscsi-node
namespace: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/csi-role: "node"
app.kubernetes.io/component: "node-linux"
spec:
selector:
matchLabels:
app.kubernetes.io/name: democratic-csi
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/csi-role: "node"
app.kubernetes.io/component: "node-linux"
template:
metadata:
annotations:
checksum/configmap: 263840c3436d67b6e25f68fabb84f358c3df828bc15d9ec327e733b38cabd1d7
labels:
app.kubernetes.io/name: democratic-csi
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/csi-role: "node"
app.kubernetes.io/component: "node-linux"
spec:
serviceAccount: democratic-csi-synology-iscsi-node-sa
priorityClassName: "system-node-critical"
# Required by iSCSI
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostAliases: []
# Required by multipath detach
hostIPC: true
hostPID: true
containers:
- name: csi-driver
image: "docker.io/democraticcsi/democratic-csi:latest"
args:
- --csi-version=1.5.0
- --csi-name=org.democratic-csi.iscsi-synology
- --driver-config-file=/config/driver-config-file.yaml
- --log-level=info
- --csi-mode=node
- --server-socket=/csi-data/csi.sock.internal
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- SYS_ADMIN
privileged: true
env:
- name: CSI_NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NODE_EXTRA_CA_CERTS
value: "/tmp/certs/extra-ca-certs.crt"
- name: ISCSIADM_HOST_STRATEGY
value: nsenter
- name: ISCSIADM_HOST_PATH
value: /usr/local/sbin/iscsiadm
# prevent crazy error messages due to the /dev host mount
terminationMessagePath: /tmp/termination-log
terminationMessagePolicy: File
livenessProbe:
failureThreshold: 3
exec:
command:
- bin/liveness-probe
- --csi-version=1.5.0
- --csi-address=/csi-data/csi.sock.internal
initialDelaySeconds: 10
timeoutSeconds: 15
periodSeconds: 60
volumeMounts:
- name: socket-dir
mountPath: /csi-data
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
- name: iscsi-dir
mountPath: /var/iscsi
mountPropagation: Bidirectional
- name: iscsi-info
mountPath: /var/lib/iscsi
mountPropagation: Bidirectional
- name: modules-dir
mountPath: /lib/modules
readOnly: true
- name: localtime
mountPath: /etc/localtime
readOnly: true
- name: udev-data
mountPath: /run/udev
- name: host-dir
mountPath: /host
mountPropagation: Bidirectional
- mountPath: /sys
name: sys-dir
- name: dev-dir
mountPath: /dev
- name: config
mountPath: /config
- name: extra-ca-certs
mountPath: /tmp/certs
- name: csi-proxy
image: "docker.io/democraticcsi/csi-grpc-proxy:v0.5.6"
env:
- name: BIND_TO
value: "unix:///csi-data/csi.sock"
- name: PROXY_TO
value: "unix:///csi-data/csi.sock.internal"
volumeMounts:
- mountPath: /csi-data
name: socket-dir
- name: driver-registrar
image: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.0"
args:
- --v=5
- --csi-address=/csi-data/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/org.democratic-csi.iscsi-synology/csi.sock
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=/var/lib/kubelet/plugins/org.democratic-csi.iscsi-synology/csi.sock
- --mode=kubelet-registration-probe
volumeMounts:
- mountPath: /csi-data
name: socket-dir
- name: registration-dir
mountPath: /registration
- name: kubelet-dir
mountPath: /var/lib/kubelet
- name: cleanup
image: "docker.io/busybox:1.37.0"
command:
- "/bin/sh"
- "-c"
- "--"
args: [ "while true; do sleep 2; done;" ]
lifecycle:
# note this runs *before* other containers are terminated
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /plugins/org.democratic-csi.iscsi-synology /registration/org.democratic-csi.iscsi-synology-reg.sock"]
volumeMounts:
- name: plugins-dir
mountPath: /plugins
- name: registration-dir
mountPath: /registration
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/org.democratic-csi.iscsi-synology
type: DirectoryOrCreate
- name: plugins-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: iscsi-dir
hostPath:
path: /var/iscsi
type:
- name: iscsi-info
hostPath:
path: /var/lib/iscsi
- name: dev-dir
hostPath:
path: /dev
type: Directory
- name: modules-dir
hostPath:
path: /lib/modules
- name: localtime
hostPath:
path: /etc/localtime
- name: udev-data
hostPath:
path: /run/udev
- name: sys-dir
hostPath:
path: /sys
type: Directory
- name: host-dir
hostPath:
path: /
type: Directory
- name: config
secret:
secretName: synology-iscsi-config-secret
- name: extra-ca-certs
configMap:
name: democratic-csi-synology-iscsi
items:
- key: extra-ca-certs
path: extra-ca-certs.crt
nodeSelector:
kubernetes.io/os: linux
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/controller.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: democratic-csi-synology-iscsi-controller
namespace: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/csi-role: "controller"
app.kubernetes.io/component: "controller-linux"
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: democratic-csi
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/csi-role: "controller"
app.kubernetes.io/component: "controller-linux"
template:
metadata:
annotations:
checksum/configmap: 263840c3436d67b6e25f68fabb84f358c3df828bc15d9ec327e733b38cabd1d7
labels:
app.kubernetes.io/name: democratic-csi
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/csi-role: "controller"
app.kubernetes.io/component: "controller-linux"
spec:
serviceAccount: democratic-csi-synology-iscsi-controller-sa
priorityClassName: "system-cluster-critical"
hostNetwork: false
dnsPolicy: ClusterFirst
hostAliases: []
hostIPC: false
containers:
# https://github.com/kubernetes-csi/external-attacher
- name: external-attacher
image: "registry.k8s.io/sig-storage/csi-attacher:v4.4.0"
args:
- --v=5
- --leader-election
- --leader-election-namespace=democratic-csi-synology-iscsi
- --timeout=90s
- --worker-threads=10
- --csi-address=/csi-data/csi.sock
volumeMounts:
- mountPath: /csi-data
name: socket-dir
# https://github.com/kubernetes-csi/external-provisioner
- name: external-provisioner
image: "registry.k8s.io/sig-storage/csi-provisioner:v3.6.0"
args:
- --v=5
- --leader-election
- --leader-election-namespace=democratic-csi-synology-iscsi
- --timeout=90s
- --worker-threads=10
- --extra-create-metadata
- --csi-address=/csi-data/csi.sock
volumeMounts:
- mountPath: /csi-data
name: socket-dir
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
# https://github.com/kubernetes-csi/external-resizer
- name: external-resizer
image: "registry.k8s.io/sig-storage/csi-resizer:v1.9.0"
args:
- --v=5
- --leader-election
- --leader-election-namespace=democratic-csi-synology-iscsi
- --timeout=90s
- --workers=10
- --csi-address=/csi-data/csi.sock
volumeMounts:
- mountPath: /csi-data
name: socket-dir
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
# https://github.com/kubernetes-csi/external-snapshotter
# beware upgrading version:
# - https://github.com/rook/rook/issues/4178
# - https://github.com/kubernetes-csi/external-snapshotter/issues/147#issuecomment-513664310
- name: external-snapshotter
image: "registry.k8s.io/sig-storage/csi-snapshotter:v8.2.1"
args:
- --v=5
- --leader-election
- --leader-election-namespace=democratic-csi-synology-iscsi
- --timeout=90s
- --worker-threads=10
- --csi-address=/csi-data/csi.sock
volumeMounts:
- mountPath: /csi-data
name: socket-dir
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: csi-driver
image: "docker.io/democraticcsi/democratic-csi:latest"
args:
- --csi-version=1.5.0
- --csi-name=org.democratic-csi.iscsi-synology
- --driver-config-file=/config/driver-config-file.yaml
- --log-level=info
- --csi-mode=controller
- --server-socket=/csi-data/csi.sock.internal
env:
- name: NODE_EXTRA_CA_CERTS
value: "/tmp/certs/extra-ca-certs.crt"
livenessProbe:
failureThreshold: 3
exec:
command:
- bin/liveness-probe
- --csi-version=1.5.0
- --csi-address=/csi-data/csi.sock.internal
initialDelaySeconds: 10
timeoutSeconds: 15
periodSeconds: 60
volumeMounts:
- name: socket-dir
mountPath: /csi-data
- name: config
mountPath: /config
- name: extra-ca-certs
mountPath: /tmp/certs
- name: csi-proxy
image: "docker.io/democraticcsi/csi-grpc-proxy:v0.5.6"
env:
- name: BIND_TO
value: "unix:///csi-data/csi.sock"
- name: PROXY_TO
value: "unix:///csi-data/csi.sock.internal"
volumeMounts:
- mountPath: /csi-data
name: socket-dir
volumes:
- name: socket-dir
emptyDir: {}
- name: config
secret:
secretName: synology-iscsi-config-secret
- name: extra-ca-certs
configMap:
name: democratic-csi-synology-iscsi
items:
- key: extra-ca-certs
path: extra-ca-certs.crt
nodeSelector:
kubernetes.io/os: linux
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/required.yaml
# 199b143b7f9f4df4dc97d9410c2fbe7aadb38e42729f08d92d12db1af0863fdf
# 1f4dc096d58f7d21e3875671aee6f29b120ab84218fa47db2cb53bc9eb5b4dac
# 9d8b3506156467be4bcf723a74d85e92d6ff851508e112fadfae94fd3a57e699
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/snapshot-classes.yaml
# this loop is deeply connected to the loop for secret parameter settings above
---
# Source: democratic-csi-synology-iscsi/charts/democratic-csi/templates/driver.yaml
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: org.democratic-csi.iscsi-synology
labels:
app.kubernetes.io/name: democratic-csi
helm.sh/chart: democratic-csi-0.15.0
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/managed-by: Helm
spec:
attachRequired: true
podInfoOnMount: true
# https://kubernetes.io/blog/2020/12/14/kubernetes-release-1.20-fsgroupchangepolicy-fsgrouppolicy/
# added in Kubernetes 1.16
# volumeLifecycleModes:
# - Persistent
# - Ephemeral
---
# Source: democratic-csi-synology-iscsi/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: synology-iscsi-config-secret
namespace: democratic-csi-synology-iscsi
labels:
app.kubernetes.io/name: synology-iscsi-config-secret
app.kubernetes.io/instance: democratic-csi-synology-iscsi
app.kubernetes.io/part-of: democratic-csi-synology-iscsi
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: driver-config-file.yaml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/democratic-csi-synology-iscsi/config
metadataPolicy: None
property: driver-config-file.yaml

View File

@@ -1,247 +0,0 @@
---
# Source: descheduler/charts/descheduler/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
---
# Source: descheduler/charts/descheduler/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: descheduler
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: default
pluginConfig:
- args:
evictDaemonSetPods: false
evictLocalStoragePods: false
ignorePvcPods: true
name: DefaultEvictor
- name: RemoveDuplicates
- args:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
name: RemovePodsViolatingNodeAffinity
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- args:
targetThresholds:
cpu: 60
memory: 60
pods: 60
thresholds:
cpu: 20
memory: 20
pods: 20
name: LowNodeUtilization
plugins:
balance:
enabled:
- RemoveDuplicates
- RemovePodsViolatingTopologySpreadConstraint
- LowNodeUtilization
deschedule:
enabled:
- RemovePodsViolatingNodeTaints
- RemovePodsViolatingNodeAffinity
- RemovePodsViolatingInterPodAntiAffinity
---
# Source: descheduler/charts/descheduler/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
---
# Source: descheduler/charts/descheduler/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler
subjects:
- kind: ServiceAccount
name: descheduler
namespace: descheduler
---
# Source: descheduler/charts/descheduler/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
name: descheduler
namespace: descheduler
spec:
clusterIP: None
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
type: ClusterIP
---
# Source: descheduler/charts/descheduler/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: descheduler
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
template:
metadata:
labels:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
annotations:
checksum/config: 827e11ad319ee1e4c515e25bf575e74c44a0a9fdac5317e6caf8798b1d282036
spec:
priorityClassName: system-cluster-critical
serviceAccountName: descheduler
containers:
- name: descheduler
image: "registry.k8s.io/descheduler/descheduler:v0.34.0"
imagePullPolicy: IfNotPresent
command:
- /bin/descheduler
args:
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval=5m
- --v=3
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 20
timeoutSeconds: 5
resources:
limits:
cpu: 500m
memory: 256Mi
requests:
cpu: 10m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: descheduler
---
# Source: descheduler/charts/descheduler/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: descheduler-servicemonitor
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- descheduler
selector:
matchLabels:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
endpoints:
- honorLabels: true
port: http-metrics
scheme: https
tlsConfig:
insecureSkipVerify: true

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,258 +0,0 @@
---
# Source: element-web/charts/element-web/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
---
# Source: element-web/charts/element-web/templates/configuration-nginx.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: element-web-nginx
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 8080;
listen [::]:8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Content-Security-Policy "frame-ancestors 'self'";
# Set no-cache for the index.html only so that browsers always check for a new copy of Element Web.
location = /index.html {
add_header Cache-Control "no-cache";
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
}
---
# Source: element-web/charts/element-web/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
data:
config.json: |
{"brand":"Alex Lebens","branding":{"auth_header_logo_url":"https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png","welcome_background_url":"https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg"},"default_country_code":"US","default_server_config":{"m.homeserver":{"base_url":"https://matrix.alexlebens.dev","server_name":"alexlebens.dev"},"m.identity_server":{"base_url":"https://alexlebens.dev"}},"default_theme":"dark","disable_3pid_login":true,"sso_redirect_options":{"immediate":true}}
---
# Source: element-web/charts/element-web/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
---
# Source: element-web/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: element-web-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: element-web
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.1
namespace: element-web
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: element-web
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: element-web
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: element-web-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: element-web/charts/element-web/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
template:
metadata:
annotations:
checksum/config: 07dfcbf8ec12efac4a1e2bcc3779031a8c985138f9a5da5b419223c06b1b4675
checksum/config-nginx: 75e21c87909fe90a77679c788366b70868068fde41c3d7a263955b35829d424b
labels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
spec:
serviceAccountName: element-web
securityContext:
{}
containers:
- name: element-web
securityContext:
{}
image: "vectorim/element-web:v1.12.6"
imagePullPolicy: IfNotPresent
env:
- name: ELEMENT_WEB_PORT
value: '8080'
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config.json
name: config
subPath: config.json
- mountPath: /etc/nginx/conf.d/config.json
name: config-nginx
subPath: config.json
volumes:
- name: config
configMap:
name: element-web
- name: config-nginx
configMap:
name: element-web-nginx
---
# Source: element-web/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: element-web-cloudflared-secret
namespace: element-web
labels:
app.kubernetes.io/name: element-web-cloudflared-secret
app.kubernetes.io/instance: element-web
app.kubernetes.io/part-of: element-web
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/element
metadataPolicy: None
property: token
---
# Source: element-web/charts/element-web/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "element-web-test-connection"
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: element-web
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['element-web:80']
restartPolicy: Never

View File

@@ -1,360 +0,0 @@
---
# Source: ephemera/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ephemera-import-nfs-storage
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-import-nfs-storage
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books Import
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: ephemera/charts/ephemera/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ephemera
labels:
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
helm.sh/chart: ephemera-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: ephemera
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: ephemera/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ephemera-import-nfs-storage
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-import-nfs-storage
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
volumeName: ephemera-import-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: ephemera/charts/ephemera/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: ephemera
labels:
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
app.kubernetes.io/service: ephemera
helm.sh/chart: ephemera-4.4.0
namespace: ephemera
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8286
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/name: ephemera
---
# Source: ephemera/charts/ephemera/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ephemera
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
helm.sh/chart: ephemera-4.4.0
namespace: ephemera
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: ephemera
app.kubernetes.io/instance: ephemera
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/name: ephemera
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: APPRISE_STORAGE_MODE
value: memory
- name: APPRISE_STATEFUL_MODE
value: disabled
- name: APPRISE_WORKER_COUNT
value: "1"
- name: APPRISE_STATELESS_URLS
valueFrom:
secretKeyRef:
key: ntfy-url
name: ephemera-apprise-config
image: caronc/apprise:1.2.6
imagePullPolicy: IfNotPresent
name: apprise-api
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: LOG_LEVEL
value: info
- name: LOG_HTML
value: "false"
- name: CAPTCHA_SOLVER
value: none
- name: TZ
value: America/Chicago
image: ghcr.io/flaresolverr/flaresolverr:v3.4.6
imagePullPolicy: IfNotPresent
name: flaresolverr
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: AA_BASE_URL
value: https://annas-archive.org
- name: FLARESOLVERR_URL
value: http://127.0.0.1:8191
- name: LG_BASE_URL
value: https://gen.com
- name: PUID
value: "0"
- name: PGID
value: "0"
image: ghcr.io/orwellianepilogue/ephemera:1.3.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /app/downloads
name: cache
- mountPath: /app/data
name: config
- mountPath: /app/ingest
name: ingest
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: ephemera
- name: ingest
persistentVolumeClaim:
claimName: ephemera-import-nfs-storage
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-key-secret
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-key-secret
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/ephemera/config
metadataPolicy: None
property: key
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-apprise-config
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-apprise-config
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/ephemera/config
metadataPolicy: None
property: ntfy-url
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-config-backup-secret
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-config-backup-secret
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/ephemera/ephemera-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: ephemera/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-ephemera
namespace: ephemera
labels:
app.kubernetes.io/name: http-route-ephemera
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ephemera.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: ephemera
port: 80
weight: 100
---
# Source: ephemera/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: ephemera-config-backup-source
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-config-backup-source
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
sourcePVC: ephemera-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: ephemera-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -1,608 +0,0 @@
---
# Source: eraser/charts/eraser/templates/eraser-controller-manager-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-controller-manager
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/eraser-imagejob-pods-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-imagejob-pods
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: eraser-manager-config
namespace: "eraser"
data:
controller_manager_config.yaml: |
apiVersion: eraser.sh/v1alpha3
components:
collector:
enabled: true
image:
tag: v1.4.1
limit: {}
request:
cpu: 10m
memory: 128Mi
remover:
image:
tag: v1.4.1
limit: {}
request:
cpu: 10m
memory: 128Mi
scanner:
config: ""
enabled: false
image:
tag: v1.4.1
limit: {}
request:
cpu: 100m
memory: 128Mi
health: {}
kind: EraserConfig
leaderElection: {}
manager:
additionalPodLabels: {}
imageJob:
cleanup:
delayOnFailure: 24h
delayOnSuccess: 0s
successRatio: 1
logLevel: info
nodeFilter:
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
type: exclude
otlpEndpoint: ""
priorityClassName: ""
profile:
enabled: false
port: 6060
pullSecrets: []
runtime:
address: unix:///run/containerd/containerd.sock
name: containerd
scheduling:
beginImmediately: true
repeatInterval: 24h
metrics: {}
webhook: {}
---
# Source: eraser/charts/eraser/templates/imagejobs.eraser.sh-customresourcedefinition.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: imagejobs.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageJob
listKind: ImageJobList
plural: imagejobs
singular: imagejob
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
# Source: eraser/charts/eraser/templates/imagelists.eraser.sh-customresourcedefinition.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: imagelists.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageList
listKind: ImageListList
plural: imagelists
singular: imagelist
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
# Source: eraser/charts/eraser/templates/eraser-manager-role-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs/status
verbs:
- get
- patch
- update
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
- patch
- update
---
# Source: eraser/charts/eraser/templates/eraser-manager-rolebinding-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/eraser-manager-role-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-role
namespace: 'eraser'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- podtemplates
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
# Source: eraser/charts/eraser/templates/eraser-manager-rolebinding-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-rolebinding
namespace: 'eraser'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/eraser-controller-manager-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
control-plane: controller-manager
helm.sh/chart: 'eraser'
name: eraser-controller-manager
namespace: 'eraser'
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
control-plane: controller-manager
helm.sh/chart: 'eraser'
template:
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
control-plane: controller-manager
helm.sh/chart: 'eraser'
spec:
affinity:
{}
containers:
- args:
- --config=/config/controller_manager_config.yaml
command:
- /manager
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: OTEL_SERVICE_NAME
value: eraser-manager
image: 'ghcr.io/eraser-dev/eraser-manager:v1.4.1'
imagePullPolicy: 'IfNotPresent'
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
memory: 30Mi
requests:
cpu: 10m
memory: 30Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /config
name: eraser-manager-config
nodeSelector:
kubernetes.io/os: linux
priorityClassName: ''
serviceAccountName: eraser-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
[]
volumes:
- configMap:
name: eraser-manager-config
name: eraser-manager-config

View File

@@ -1,518 +0,0 @@
---
# Source: external-dns/charts/external-dns-unifi/crds/dnsendpoints.externaldns.k8s.io.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/external-dns/pull/2007
name: dnsendpoints.externaldns.k8s.io
spec:
group: externaldns.k8s.io
names:
kind: DNSEndpoint
listKind: DNSEndpointList
plural: dnsendpoints
singular: dnsendpoint
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: |-
DNSEndpoint is a contract that a user-specified CRD must implement to be used as a source for external-dns.
The user-specified CRD should also have the status sub-resource.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: DNSEndpointSpec defines the desired state of DNSEndpoint
properties:
endpoints:
items:
description: Endpoint is a high-level way of a connection between a service and an IP
properties:
dnsName:
description: The hostname of the DNS record
type: string
labels:
additionalProperties:
type: string
description: Labels stores labels defined for the Endpoint
type: object
providerSpecific:
description: ProviderSpecific stores provider specific config
items:
description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers
properties:
name:
type: string
value:
type: string
type: object
type: array
recordTTL:
description: TTL for the record
format: int64
type: integer
recordType:
description: RecordType type of record, e.g. CNAME, A, AAAA, SRV, TXT etc
type: string
setIdentifier:
description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple')
type: string
targets:
description: The targets the DNS record points to
items:
type: string
type: array
type: object
type: array
type: object
status:
description: DNSEndpointStatus defines the observed state of DNSEndpoint
properties:
observedGeneration:
description: The generation observed by the external-dns controller.
format: int64
type: integer
type: object
type: object
served: true
storage: true
subresources:
status: {}
---
# Source: external-dns/charts/external-dns-unifi/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: true
---
# Source: external-dns/charts/external-dns-unifi/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns-unifi
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: ["externaldns.k8s.io"]
resources: ["dnsendpoints"]
verbs: ["get","watch","list"]
- apiGroups: ["externaldns.k8s.io"]
resources: ["dnsendpoints/status"]
verbs: ["*"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get","watch","list"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["httproutes"]
verbs: ["get","watch","list"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["tlsroutes"]
verbs: ["get","watch","list"]
---
# Source: external-dns/charts/external-dns-unifi/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-unifi-viewer
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns-unifi
subjects:
- kind: ServiceAccount
name: external-dns-unifi
namespace: external-dns
---
# Source: external-dns/charts/external-dns-unifi/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
ports:
- name: http
port: 7979
targetPort: http
protocol: TCP
- name: http-webhook
port: 8080
targetPort: http-webhook
protocol: TCP
---
# Source: external-dns/charts/external-dns-unifi/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
spec:
automountServiceAccountToken: true
serviceAccountName: external-dns-unifi
securityContext:
fsGroup: 65534
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: external-dns
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
image: registry.k8s.io/external-dns/external-dns:v0.19.0
imagePullPolicy: IfNotPresent
args:
- --log-level=info
- --log-format=text
- --interval=1m
- --source=ingress
- --source=crd
- --source=gateway-httproute
- --source=gateway-tlsroute
- --policy=sync
- --registry=txt
- --txt-owner-id=default
- --txt-prefix=k8s.
- --domain-filter=alexlebens.net
- --provider=webhook
- --ignore-ingress-tls-spec
ports:
- name: http
protocol: TCP
containerPort: 7979
livenessProbe:
failureThreshold: 2
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
- name: webhook
image: ghcr.io/kashalls/external-dns-unifi-webhook:v0.7.0
imagePullPolicy: IfNotPresent
env:
- name: UNIFI_HOST
value: https://192.168.1.1
- name: UNIFI_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: external-dns-unifi-secret
- name: LOG_LEVEL
value: debug
ports:
- name: http-webhook
protocol: TCP
containerPort: 8080
livenessProbe:
failureThreshold: 2
httpGet:
path: /healthz
port: http-webhook
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
httpGet:
path: /readyz
port: http-webhook
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: external-device-names
namespace: external-dns
labels:
app.kubernetes.io/name: external-device-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Unifi UDM
- dnsName: unifi.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 192.168.1.1
# Synology Web
- dnsName: synology.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.61
# Synology Storage
- dnsName: synologybond.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.64
# HD Homerun
- dnsName: hdhr.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.72
# Pi KVM
- dnsName: pikvm.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.71
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: iot-device-names
namespace: external-dns
labels:
app.kubernetes.io/name: iot-device-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Airgradient
- dnsName: it01ag.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.83
# Phillips Hue
- dnsName: it02ph.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.85
# TubesZB ZigBee
- dnsName: it03tb.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.81
# TubesZB Z-Wave
- dnsName: it04tb.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.82
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: server-host-names
namespace: external-dns
labels:
app.kubernetes.io/name: server-host-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Unifi Gateway
- dnsName: nw01un.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 192.168.1.1
# Synology
- dnsName: ps02sn.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.61
# Synology Storage
- dnsName: ps02sn-bond.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.64
# Raspberry Pi
- dnsName: ps08rp.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.51
# Raspberry Pi
- dnsName: ps09rp.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.52
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: cluster-service-names
namespace: external-dns
labels:
app.kubernetes.io/name: cluster-service-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Treafik Proxy
- dnsName: traefik-cl01tl.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.21
# Treafik Proxy
- dnsName: blocky.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.22
# Treafik Proxy
- dnsName: plex.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.23
---
# Source: external-dns/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: external-dns-unifi-secret
namespace: external-dns
labels:
app.kubernetes.io/name: external-dns-unifi-secret
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: api-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth/cl01tl
metadataPolicy: None
property: api-key
---
# Source: external-dns/charts/external-dns-unifi/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: app.kubernetes.io/instance
namespaceSelector:
matchNames:
- external-dns
selector:
matchLabels:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
endpoints:
- port: http
path: /metrics
- port: http-webhook
path: /metrics

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,430 +0,0 @@
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: garage
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
namespace: garage
data:
garage.toml: |
replication_factor = 1
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h"
compression_level = 3
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901"
allow_world_readable_secrets = false
[s3_api]
s3_region = "us-east-1"
api_bind_addr = "[::]:3900"
root_domain = ".garage-s3.alexlebens.net"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".garage-s3.alexlebens.net"
[admin]
api_bind_addr = "[::]:3903"
metrics_require_token = true
---
# Source: garage/charts/garage/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garage-data
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: garage
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "800Gi"
storageClassName: "synology-iscsi-delete"
---
# Source: garage/charts/garage/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garage-db
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: garage
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: garage/charts/garage/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garage-snapshots
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: garage
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "synology-iscsi-delete"
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-main
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
app.kubernetes.io/service: garage-main
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
type: ClusterIP
ports:
- port: 3903
targetPort: 3903
protocol: TCP
name: admin
- port: 3901
targetPort: 3901
protocol: TCP
name: rpc
- port: 3900
targetPort: 3900
protocol: TCP
name: s3
- port: 3902
targetPort: 3902
protocol: TCP
name: web
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-webui
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
app.kubernetes.io/service: garage-webui
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
type: ClusterIP
ports:
- port: 3909
targetPort: 3909
protocol: TCP
name: webui
selector:
app.kubernetes.io/controller: webui
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: garage-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
template:
metadata:
annotations:
checksum/configMaps: aecb65cb46684688a356974d7ecaec4abb2d4fed3f71863780e7f35505c7af02
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- envFrom:
- secretRef:
name: garage-token-secret
image: dxflrs/garage:v2.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /etc/garage.toml
mountPropagation: None
name: config
readOnly: true
subPath: garage.toml
- mountPath: /var/lib/garage/data
name: data
- mountPath: /var/lib/garage/meta
name: db
- mountPath: /var/lib/garage/snapshots
name: snapshots
volumes:
- configMap:
name: garage
name: config
- name: data
persistentVolumeClaim:
claimName: garage-data
- name: db
persistentVolumeClaim:
claimName: garage-db
- name: snapshots
persistentVolumeClaim:
claimName: garage-snapshots
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: garage-webui
labels:
app.kubernetes.io/controller: webui
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: webui
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
template:
metadata:
annotations:
checksum/configMaps: aecb65cb46684688a356974d7ecaec4abb2d4fed3f71863780e7f35505c7af02
labels:
app.kubernetes.io/controller: webui
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: API_BASE_URL
value: http://garage-main.garage:3903
- name: S3_ENDPOINT_URL
value: http://garage-main.garage:3900
- name: API_ADMIN_KEY
valueFrom:
secretKeyRef:
key: GARAGE_ADMIN_TOKEN
name: garage-token-secret
image: khairul169/garage-webui:1.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /etc/garage.toml
mountPropagation: None
name: config
readOnly: true
subPath: garage.toml
volumes:
- configMap:
name: garage
name: config
---
# Source: garage/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: garage-token-secret
namespace: garage
labels:
app.kubernetes.io/name: garage-token-secret
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: GARAGE_RPC_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/garage/token
metadataPolicy: None
property: rpc
- secretKey: GARAGE_ADMIN_TOKEN
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/garage/token
metadataPolicy: None
property: admin
- secretKey: GARAGE_METRICS_TOKEN
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/garage/token
metadataPolicy: None
property: metric
---
# Source: garage/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-garage-webui
namespace: garage
labels:
app.kubernetes.io/name: http-route-garage-webui
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- garage-webui.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: garage-webui
port: 3909
weight: 100
---
# Source: garage/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-garage-s3
namespace: garage
labels:
app.kubernetes.io/name: http-route-garage-s3
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- garage-s3.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: garage-main
port: 3900
weight: 100
---
# Source: garage/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: garage
namespace: garage
labels:
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
selector:
matchLabels:
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
endpoints:
- port: admin
interval: 1m
scrapeTimeout: 30s
path: /metrics
bearerTokenSecret:
name: garage-token-secret
key: GARAGE_METRICS_TOKEN

File diff suppressed because it is too large Load Diff

View File

@@ -1,163 +0,0 @@
---
# Source: generic-device-plugin/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/part-of: generic-device-plugin
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/version: 0.20.3
helm.sh/chart: generic-device-plugin-0.20.3
namespace: generic-device-plugin
data:
config.yaml: |
devices:
- name: tun
groups:
- count: 1000
paths:
- path: /dev/net/tun
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/service: generic-device-plugin
app.kubernetes.io/version: 0.20.3
helm.sh/chart: generic-device-plugin-0.20.3
namespace: generic-device-plugin
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/name: generic-device-plugin
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/version: 0.20.3
helm.sh/chart: generic-device-plugin-0.20.3
namespace: generic-device-plugin
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/instance: generic-device-plugin
template:
metadata:
annotations:
checksum/configMaps: 473a15a17751b0c136528e129767f6ed0871ca7522e5a6ccd90d041808571e81
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/name: generic-device-plugin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
priorityClassName: system-node-critical
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- args:
- --config=/config/config.yaml
env:
- name: LISTEN
value: :8080
- name: PLUGIN_DIRECTORY
value: /var/lib/kubelet/device-plugins
- name: DOMAIN
value: devic.es
image: ghcr.io/squat/generic-device-plugin:latest@sha256:4896ffd516624d6eb7572e102bc4397e91f8bc3b2fb38b5bfefd758baae3dcf2
imagePullPolicy: Always
name: main
securityContext:
privileged: true
volumeMounts:
- mountPath: /config
name: config
- mountPath: /dev
name: dev
- mountPath: /var/lib/kubelet/device-plugins
name: device-plugins
volumes:
- configMap:
name: generic-device-plugin-config
name: config
- hostPath:
path: /dev
name: dev
- hostPath:
path: /var/lib/kubelet/device-plugins
name: device-plugins
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/version: 0.20.3
helm.sh/chart: generic-device-plugin-0.20.3
namespace: generic-device-plugin
spec:
jobLabel: "generic-device-plugin"
namespaceSelector:
matchNames:
- generic-device-plugin
selector:
matchLabels:
app.kubernetes.io/service: generic-device-plugin
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/instance: generic-device-plugin
endpoints:
- interval: 30s
path: /metrics
port: http
scheme: http
scrapeTimeout: 10s

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,308 +0,0 @@
---
# Source: headlamp/charts/headlamp/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: headlamp
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
---
# Source: headlamp/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: headlamp-admin
namespace: headlamp
labels:
app.kubernetes.io/name: headlamp-admin
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
---
# Source: headlamp/charts/headlamp/templates/plugin-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: headlamp-plugin-config
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
data:
plugin.yml: |
plugins:
- name: cert-manager
source: https://artifacthub.io/packages/headlamp/headlamp-plugins/headlamp_cert-manager
version: 0.1.0
- name: trivy
source: https://artifacthub.io/packages/headlamp/headlamp-trivy/headlamp_trivy
version: 0.3.1
installOptions:
parallel: true
maxConcurrent: 2
---
# Source: headlamp/charts/headlamp/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: headlamp-admin
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: headlamp
namespace: headlamp
---
# Source: headlamp/templates/cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-admin-oidc
namespace: headlamp
labels:
app.kubernetes.io/name: cluster-admin-oidc
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: User
name: alexanderlebens@gmail.com
apiGroup: rbac.authorization.k8s.io
- kind: ServiceAccount
name: headlamp-admin
namespace: headlamp
---
# Source: headlamp/charts/headlamp/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: headlamp
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
---
# Source: headlamp/charts/headlamp/templates/deployment.yaml
# This block of code is used to extract the values from the env.
# This is done to check if the values are non-empty and if they are, they are used in the deployment.yaml.
apiVersion: apps/v1
kind: Deployment
metadata:
name: headlamp
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
template:
metadata:
labels:
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
spec:
serviceAccountName: headlamp
automountServiceAccountToken: true
securityContext:
{}
containers:
- name: headlamp
securityContext:
privileged: false
runAsGroup: 101
runAsNonRoot: true
runAsUser: 100
image: "ghcr.io/headlamp-k8s/headlamp:v0.38.0"
imagePullPolicy: IfNotPresent
# Check if externalSecret is enabled
envFrom:
- secretRef:
name: headlamp-oidc-secret
args:
- "-in-cluster"
- "-watch-plugins-changes"
- "-plugins-dir=/headlamp/plugins"
- "-oidc-client-id=$(OIDC_CLIENT_ID)"
- "-oidc-client-secret=$(OIDC_CLIENT_SECRET)"
- "-oidc-idp-issuer-url=$(OIDC_ISSUER_URL)"
- "-oidc-scopes=$(OIDC_SCOPES)"
ports:
- name: http
containerPort: 4466
protocol: TCP
livenessProbe:
httpGet:
path: "/"
port: http
readinessProbe:
httpGet:
path: "/"
port: http
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: plugins-dir
mountPath: /headlamp/plugins
- name: headlamp-plugin
image: node:lts-alpine
command: ["/bin/sh", "-c"]
args:
- |
if [ -f "/config/plugin.yml" ]; then
echo "Installing plugins from config..."
cat /config/plugin.yml
# Use a writable cache directory
export NPM_CONFIG_CACHE=/tmp/npm-cache
# Use a writable config directory
export NPM_CONFIG_USERCONFIG=/tmp/npm-userconfig
mkdir -p /tmp/npm-cache /tmp/npm-userconfig
npx --yes @headlamp-k8s/pluginctl@latest install --config /config/plugin.yml --folderName /headlamp/plugins --watch
fi
volumeMounts:
- name: plugins-dir
mountPath: /headlamp/plugins
- name: plugin-config
mountPath: /config
resources:
null
securityContext:
readOnlyRootFilesystem: false
runAsNonRoot: false
runAsUser: 0
volumes:
- name: plugins-dir
emptyDir: {}
- name: plugin-config
configMap:
name: headlamp-plugin-config
---
# Source: headlamp/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: headlamp-oidc-secret
namespace: headlamp
labels:
app.kubernetes.io/name: headlamp-oidc-secret
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: secret
- secretKey: OIDC_ISSUER_URL
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: issuer
- secretKey: OIDC_SCOPES
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: scopes
- secretKey: OIDC_VALIDATOR_ISSUER_URL
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: validator-issuer-url
- secretKey: OIDC_VALIDATOR_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: validator-client-id
---
# Source: headlamp/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: https-route-headlamp
namespace: headlamp
labels:
app.kubernetes.io/name: https-route-headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- headlamp.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: headlamp
port: 80
weight: 100

View File

@@ -1,283 +0,0 @@
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: home-assistant-config
labels:
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: home-assistant-main
labels:
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
app.kubernetes.io/service: home-assistant-main
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8123
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/name: home-assistant
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: home-assistant-code-server
labels:
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
app.kubernetes.io/service: home-assistant-code-server
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
type: ClusterIP
ports:
- port: 8443
targetPort: 8443
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/name: home-assistant
---
# Source: home-assistant/charts/home-assistant/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: home-assistant
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: home-assistant
helm.sh/chart: home-assistant-4.4.0
namespace: home-assistant
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: home-assistant
app.kubernetes.io/instance: home-assistant
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/name: home-assistant
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: DEFAULT_WORKSPACE
value: /config
envFrom:
- secretRef:
name: home-assistant-code-server-password-secret
image: ghcr.io/linuxserver/code-server:4.106.3@sha256:aab9520fe923b2d93dccc2c806f3dc60649c2f4a2847fcd40c942227d0f1ae8f
imagePullPolicy: IfNotPresent
name: code-server
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config/home-assistant
name: config
- env:
- name: TZ
value: US/Central
image: ghcr.io/home-assistant/home-assistant:2025.12.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: home-assistant-config
---
# Source: home-assistant/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: home-assistant-code-server-password-secret
namespace: home-assistant
labels:
app.kubernetes.io/name: home-assistant-code-server-password-secret
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/home-assistant/code-server/auth
metadataPolicy: None
property: PASSWORD
- secretKey: SUDO_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/home-assistant/code-server/auth
metadataPolicy: None
property: SUDO_PASSWORD
---
# Source: home-assistant/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: home-assistant-token-secret
namespace: home-assistant
labels:
app.kubernetes.io/name: home-assistant-token-secret
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: bearer-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/home-assistant/auth
metadataPolicy: None
property: bearer-token
---
# Source: home-assistant/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-home-assistant
namespace: home-assistant
labels:
app.kubernetes.io/name: http-route-home-assistant
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- home-assistant.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: home-assistant-main
port: 80
weight: 100
---
# Source: home-assistant/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-home-assistant-code-server
namespace: home-assistant
labels:
app.kubernetes.io/name: http-route-home-assistant-code-server
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- home-assistant-code-server.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: home-assistant-code-server
port: 8443
weight: 100
---
# Source: home-assistant/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: home-assistant
namespace: home-assistant
labels:
app.kubernetes.io/name: home-assistant
app.kubernetes.io/instance: home-assistant
app.kubernetes.io/part-of: home-assistant
spec:
selector:
matchLabels:
app.kubernetes.io/name: home-assistant
app.kubernetes.io/service: home-assistant-main
app.kubernetes.io/instance: home-assistant
endpoints:
- port: http
interval: 3m
scrapeTimeout: 1m
path: /api/prometheus
bearerTokenSecret:
name: home-assistant-token-secret
key: bearer-token

View File

@@ -1,307 +0,0 @@
---
# Source: homepage/charts/homepage/templates/common.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage-dev
labels:
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: homepage
helm.sh/chart: homepage-4.4.0
namespace: homepage-dev
data:
bookmarks.yaml: ""
docker.yaml: ""
kubernetes.yaml: ""
services.yaml: |
- Applications:
- Auth:
icon: sh-authentik.webp
description: Authentik
href: https://auth.alexlebens.dev
siteMonitor: https://auth.alexlebens.dev
statusStyle: dot
- Gitea:
icon: sh-gitea.webp
description: Gitea
href: https://gitea.alexlebens.dev
siteMonitor: https://gitea.alexlebens.dev
statusStyle: dot
- Code:
icon: sh-visual-studio-code.webp
description: VS Code
href: https://codeserver.alexlebens.dev
siteMonitor: https://codeserver.alexlebens.dev
statusStyle: dot
- Site:
icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png
description: Profile Website
href: https://www.alexlebens.dev
siteMonitor: https://www.alexlebens.dev
statusStyle: dot
- Content Management:
icon: directus.png
description: Directus
href: https://directus.alexlebens.dev
siteMonitor: https://directus.alexlebens.dev
statusStyle: dot
- Social Media Management:
icon: sh-postiz.webp
description: Postiz
href: https://postiz.alexlebens.dev
siteMonitor: https://postiz.alexlebens.dev
statusStyle: dot
- Chat:
icon: sh-element.webp
description: Matrix
href: https://chat.alexlebens.dev
siteMonitor: https://chat.alexlebens.dev
statusStyle: dot
- Wiki:
icon: sh-outline.webp
description: Outline
href: https://wiki.alexlebens.dev
siteMonitor: https://wiki.alexlebens.dev
statusStyle: dot
- Passwords:
icon: sh-vaultwarden-light.webp
description: Vaultwarden
href: https://passwords.alexlebens.dev
siteMonitor: https://passwords.alexlebens.dev
statusStyle: dot
- Bookmarks:
icon: sh-karakeep-light.webp
description: Karakeep
href: https://karakeep.alexlebens.dev
siteMonitor: https://karakeep.alexlebens.dev
statusStyle: dot
- RSS:
icon: sh-freshrss.webp
description: FreshRSS
href: https://rss.alexlebens.dev
siteMonitor: https://rss.alexlebens.dev
statusStyle: dot
settings.yaml: |
favicon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.svg
headerStyle: clean
hideVersion: true
color: zinc
background:
image: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg
brightness: 50
theme: dark
disableCollapse: true
widgets.yaml: |
- logo:
icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png
- datetime:
text_size: xl
format:
dateStyle: long
timeStyle: short
hour12: false
- openmeteo:
label: St. Paul
latitude: 44.954445
longitude: -93.091301
timezone: America/Chicago
units: metric
cache: 5
format:
maximumFractionDigits: 0
---
# Source: homepage/charts/homepage/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: homepage-dev
labels:
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: homepage
app.kubernetes.io/service: homepage-dev
helm.sh/chart: homepage-4.4.0
namespace: homepage-dev
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/name: homepage
---
# Source: homepage/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage-dev-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.1
namespace: homepage-dev
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: homepage-dev
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: homepage-dev-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: homepage/charts/homepage/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage-dev
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: homepage
helm.sh/chart: homepage-4.4.0
annotations:
reloader.stakater.com/auto: "true"
namespace: homepage-dev
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: homepage
app.kubernetes.io/instance: homepage-dev
template:
metadata:
annotations:
checksum/configMaps: d1306b9af923c5b3f02566a43c7a141c7168ebf8a74e5ff1a2d5d8082001c1a1
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/name: homepage
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: home.alexlebens.dev
image: ghcr.io/gethomepage/homepage:v1.7.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config/bookmarks.yaml
mountPropagation: None
name: config
readOnly: true
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
mountPropagation: None
name: config
readOnly: true
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
mountPropagation: None
name: config
readOnly: true
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
mountPropagation: None
name: config
readOnly: true
subPath: services.yaml
- mountPath: /app/config/settings.yaml
mountPropagation: None
name: config
readOnly: true
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
mountPropagation: None
name: config
readOnly: true
subPath: widgets.yaml
volumes:
- configMap:
name: homepage-dev
name: config
---
# Source: homepage/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: homepage-dev-cloudflared-secret
namespace: homepage-dev
labels:
app.kubernetes.io/name: homepage-dev-cloudflared-secret
app.kubernetes.io/instance: homepage-dev
app.kubernetes.io/part-of: homepage-dev
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/homepage-dev
metadataPolicy: None
property: token

File diff suppressed because it is too large Load Diff

View File

@@ -1,129 +0,0 @@
---
# Source: huntarr/charts/huntarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: huntarr-config
labels:
app.kubernetes.io/instance: huntarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: huntarr
helm.sh/chart: huntarr-4.4.0
namespace: huntarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: huntarr/charts/huntarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: huntarr
labels:
app.kubernetes.io/instance: huntarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: huntarr
app.kubernetes.io/service: huntarr
helm.sh/chart: huntarr-4.4.0
namespace: huntarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9705
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: huntarr
app.kubernetes.io/name: huntarr
---
# Source: huntarr/charts/huntarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: huntarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: huntarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: huntarr
helm.sh/chart: huntarr-4.4.0
namespace: huntarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: huntarr
app.kubernetes.io/instance: huntarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: huntarr
app.kubernetes.io/name: huntarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/plexguide/huntarr:8.2.10
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: huntarr-config
---
# Source: huntarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-huntarr
namespace: huntarr
labels:
app.kubernetes.io/name: http-route-huntarr
app.kubernetes.io/instance: huntarr
app.kubernetes.io/part-of: huntarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- huntarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: huntarr
port: 80
weight: 100

File diff suppressed because it is too large Load Diff

View File

@@ -1,326 +0,0 @@
---
# Source: jellyfin/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: jellyfin/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-youtube-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-youtube-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadOnlyMany
nfs:
path: /volume2/Storage/YouTube
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellyfin-config
labels:
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
helm.sh/chart: jellyfin-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: jellyfin
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Gi"
storageClassName: "ceph-block"
---
# Source: jellyfin/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
volumeName: jellyfin-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: jellyfin/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-youtube-nfs-storage
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-youtube-nfs-storage
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
volumeName: jellyfin-youtube-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 1Gi
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: jellyfin
labels:
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
app.kubernetes.io/service: jellyfin
helm.sh/chart: jellyfin-4.4.0
namespace: jellyfin
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8096
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/name: jellyfin
---
# Source: jellyfin/charts/jellyfin/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyfin
helm.sh/chart: jellyfin-4.4.0
namespace: jellyfin
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: jellyfin
app.kubernetes.io/instance: jellyfin
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/name: jellyfin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: JELLYFIN_hostwebclient
value: "true"
- name: JELLYFIN_PublishedServerUrl
value: https://jellyfin.alexlebens.net/
image: ghcr.io/jellyfin/jellyfin:10.11.4
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 1
gpu.intel.com/i915: 1
memory: 2Gi
volumeMounts:
- mountPath: /cache
name: cache
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- mountPath: /mnt/youtube
name: youtube
readOnly: true
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: jellyfin-config
- name: media
persistentVolumeClaim:
claimName: jellyfin-nfs-storage
- name: youtube
persistentVolumeClaim:
claimName: jellyfin-youtube-nfs-storage
---
# Source: jellyfin/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellyfin-config-backup-secret
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-config-backup-secret
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellyfin/jellyfin-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: jellyfin/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-jellyfin
namespace: jellyfin
labels:
app.kubernetes.io/name: http-route-jellyfin
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- jellyfin.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: jellyfin
port: 80
weight: 100
---
# Source: jellyfin/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: jellyfin-config-backup-source
namespace: jellyfin
labels:
app.kubernetes.io/name: jellyfin-config-backup-source
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
sourcePVC: jellyfin-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: jellyfin-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -1,861 +0,0 @@
---
# Source: jellystat/charts/jellystat/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellystat-data
labels:
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
helm.sh/chart: jellystat-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: jellystat
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: jellystat/charts/jellystat/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: jellystat
labels:
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
app.kubernetes.io/service: jellystat
helm.sh/chart: jellystat-4.4.0
namespace: jellystat
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/name: jellystat
---
# Source: jellystat/charts/jellystat/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellystat
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
helm.sh/chart: jellystat-4.4.0
namespace: jellystat
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: jellystat
app.kubernetes.io/instance: jellystat
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/name: jellystat
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: JWT_SECRET
valueFrom:
secretKeyRef:
key: secret-key
name: jellystat-secret
- name: JS_USER
valueFrom:
secretKeyRef:
key: user
name: jellystat-secret
- name: JS_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: jellystat-secret
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
key: username
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
key: dbname
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_IP
valueFrom:
secretKeyRef:
key: host
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
key: port
name: jellystat-postgresql-17-cluster-app
image: cyfershepard/jellystat:1.1.6
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /app/backend/backup-data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: jellystat-data
---
# Source: jellystat/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: jellystat-postgresql-17-cluster
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "jellystat-postgresql-17-external-backup"
serverName: "jellystat-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "jellystat-postgresql-17-garage-local-backup"
serverName: "jellystat-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-recovery"
serverName: jellystat-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: jellystat-postgresql-17-backup-1
externalClusters:
- name: jellystat-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "jellystat-postgresql-17-recovery"
serverName: jellystat-postgresql-17-backup-1
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: secret-key
- secretKey: user
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: user
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: password
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-data-backup-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-data-backup-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellystat/jellystat-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-postgresql-17-cluster-backup-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-postgresql-17-cluster-backup-secret-garage
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: jellystat/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-jellystat
namespace: jellystat
labels:
app.kubernetes.io/name: http-route-jellystat
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- jellystat.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: jellystat
port: 80
weight: 100
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-external-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-garage-local-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-recovery"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: jellystat/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: jellystat-postgresql-17-alert-rules
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/jellystat-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 1
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 2
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
---
# Source: jellystat/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: jellystat-data-backup-source
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-data-backup-source
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
sourcePVC: jellystat-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: jellystat-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "jellystat-postgresql-17-daily-backup-scheduled-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: jellystat-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-external-backup"
---
# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "jellystat-postgresql-17-live-backup-scheduled-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: jellystat-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-garage-local-backup"

View File

@@ -1,712 +0,0 @@
---
# Source: karakeep/charts/meilisearch/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: false
---
# Source: karakeep/charts/meilisearch/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: karakeep-meilisearch-environment
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
data:
MEILI_ENV: "production"
MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE: "true"
MEILI_NO_ANALYTICS: "true"
MEILI_EXPERIMENTAL_ENABLE_METRICS: "true"
---
# Source: karakeep/charts/karakeep/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: karakeep
labels:
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
helm.sh/chart: karakeep-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: karakeep
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: karakeep/charts/meilisearch/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: karakeep/charts/karakeep/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: karakeep
labels:
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
app.kubernetes.io/service: karakeep
helm.sh/chart: karakeep-4.4.0
namespace: karakeep
spec:
type: ClusterIP
ports:
- port: 9222
targetPort: 9222
protocol: TCP
name: chrome
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: karakeep
---
# Source: karakeep/charts/meilisearch/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 7700
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
---
# Source: karakeep/charts/cloudflared/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karakeep-cloudflared
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-1.23.1
namespace: karakeep
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: cloudflared
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: karakeep-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: karakeep/charts/karakeep/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karakeep
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karakeep
helm.sh/chart: karakeep-4.4.0
namespace: karakeep
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: karakeep
app.kubernetes.io/name: karakeep
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
image: gcr.io/zenika-hub/alpine-chrome:124
imagePullPolicy: IfNotPresent
name: chrome
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: DATA_DIR
value: /data
- name: DB_WAL_MODE
value: "true"
- name: NEXTAUTH_URL
value: https://karakeep.alexlebens.dev/
- name: NEXTAUTH_SECRET
valueFrom:
secretKeyRef:
key: key
name: karakeep-key-secret
- name: PROMETHEUS_AUTH_TOKEN
valueFrom:
secretKeyRef:
key: prometheus-token
name: karakeep-key-secret
- name: ASSET_STORE_S3_ENDPOINT
value: http://rook-ceph-rgw-ceph-objectstore.rook-ceph.svc:80
- name: ASSET_STORE_S3_REGION
value: us-east-1
- name: ASSET_STORE_S3_BUCKET
valueFrom:
configMapKeyRef:
key: BUCKET_NAME
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: ceph-bucket-karakeep
- name: ASSET_STORE_S3_FORCE_PATH_STYLE
value: "true"
- name: MEILI_ADDR
value: http://karakeep-meilisearch.karakeep:7700
- name: MEILI_MASTER_KEY
valueFrom:
secretKeyRef:
key: MEILI_MASTER_KEY
name: karakeep-meilisearch-master-key-secret
- name: BROWSER_WEB_URL
value: http://karakeep.karakeep:9222
- name: DISABLE_SIGNUPS
value: "false"
- name: OAUTH_PROVIDER_NAME
value: Authentik
- name: OAUTH_WELLKNOWN_URL
value: https://auth.alexlebens.dev/application/o/karakeep/.well-known/openid-configuration
- name: OAUTH_SCOPE
value: openid email profile
- name: OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
key: AUTHENTIK_CLIENT_ID
name: karakeep-oidc-secret
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: AUTHENTIK_CLIENT_SECRET
name: karakeep-oidc-secret
- name: OLLAMA_BASE_URL
value: http://ollama-server-3.ollama:11434
- name: OLLAMA_KEEP_ALIVE
value: 5m
- name: INFERENCE_TEXT_MODEL
value: gemma3:4b
- name: INFERENCE_IMAGE_MODEL
value: granite3.2-vision:2b
- name: EMBEDDING_TEXT_MODEL
value: mxbai-embed-large
- name: INFERENCE_JOB_TIMEOUT_SEC
value: "720"
image: ghcr.io/karakeep-app/karakeep:0.29.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: karakeep
---
# Source: karakeep/charts/meilisearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: karakeep-meilisearch
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
serviceName: karakeep-meilisearch
selector:
matchLabels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
template:
metadata:
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
annotations:
checksum/config: 574eb89dc1c0d091db8dc897d730588383fe85ac2b886588fe96f50eb3d1d7c2
spec:
serviceAccountName: karakeep-meilisearch
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
volumes:
- name: tmp
emptyDir: {}
- name: data
persistentVolumeClaim:
claimName: karakeep-meilisearch
containers:
- name: meilisearch
image: "getmeili/meilisearch:v1.18.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- name: tmp
mountPath: /tmp
- name: data
mountPath: /meili_data
envFrom:
- configMapRef:
name: karakeep-meilisearch-environment
- secretRef:
name: karakeep-meilisearch-master-key-secret
ports:
- name: http
containerPort: 7700
protocol: TCP
startupProbe:
httpGet:
path: /health
port: http
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 60
timeoutSeconds: 1
livenessProbe:
httpGet:
path: /health
port: http
periodSeconds: 10
initialDelaySeconds: 0
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
periodSeconds: 10
initialDelaySeconds: 0
timeoutSeconds: 10
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-key-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-key-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/key
metadataPolicy: None
property: key
- secretKey: prometheus-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/key
metadataPolicy: None
property: prometheus-token
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-oidc-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-oidc-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AUTHENTIK_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/karakeep
metadataPolicy: None
property: client
- secretKey: AUTHENTIK_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/karakeep
metadataPolicy: None
property: secret
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-meilisearch-master-key-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-meilisearch-master-key-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: MEILI_MASTER_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/karakeep/meilisearch
metadataPolicy: None
property: MEILI_MASTER_KEY
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-cloudflared-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-cloudflared-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/karakeep
metadataPolicy: None
property: token
---
# Source: karakeep/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: karakeep-data-backup-secret
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-data-backup-secret
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/karakeep/karakeep-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: karakeep/templates/object-bucket-claim.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket-karakeep
labels:
app.kubernetes.io/name: ceph-bucket-karakeep
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
generateBucketName: bucket-karakeep
storageClassName: ceph-bucket
---
# Source: karakeep/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: karakeep-data-backup-source
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep-data-backup-source
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
sourcePVC: karakeep-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: karakeep-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: karakeep/charts/meilisearch/templates/serviceMonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: karakeep-meilisearch
namespace: karakeep
labels:
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/version: "v1.18.0"
app.kubernetes.io/component: search-engine
app.kubernetes.io/part-of: meilisearch
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: karakeep
namespaceSelector:
matchNames:
- karakeep
selector:
matchLabels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
endpoints:
- port: http
path: /metrics
interval: 1m
scrapeTimeout: 10s
bearerTokenSecret:
name: karakeep-meilisearch-master-key-secret
key: MEILI_MASTER_KEY
---
# Source: karakeep/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: karakeep
namespace: karakeep
labels:
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /api/metrics
authorization:
credentials:
key: prometheus-token
name: karakeep-key-secret
selector:
matchLabels:
app.kubernetes.io/name: karakeep
app.kubernetes.io/instance: karakeep
---
# Source: karakeep/charts/meilisearch/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: karakeep-meilisearch-test-connection
labels:
app.kubernetes.io/name: meilisearch
helm.sh/chart: meilisearch-0.17.2
app.kubernetes.io/instance: karakeep
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['karakeep-meilisearch:7700']
restartPolicy: Never

View File

@@ -1,157 +0,0 @@
---
# Source: kiwix/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: kiwix-nfs-storage
namespace: kiwix
labels:
app.kubernetes.io/name: kiwix-nfs-storage
app.kubernetes.io/instance: kiwix
app.kubernetes.io/part-of: kiwix
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Kiwix
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: kiwix/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kiwix-nfs-storage
namespace: kiwix
labels:
app.kubernetes.io/name: kiwix-nfs-storage
app.kubernetes.io/instance: kiwix
app.kubernetes.io/part-of: kiwix
spec:
volumeName: kiwix-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: kiwix/charts/kiwix/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: kiwix
labels:
app.kubernetes.io/instance: kiwix
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kiwix
app.kubernetes.io/service: kiwix
helm.sh/chart: kiwix-4.4.0
namespace: kiwix
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kiwix
app.kubernetes.io/name: kiwix
---
# Source: kiwix/charts/kiwix/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kiwix
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kiwix
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kiwix
helm.sh/chart: kiwix-4.4.0
namespace: kiwix
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: kiwix
app.kubernetes.io/instance: kiwix
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kiwix
app.kubernetes.io/name: kiwix
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- '*.zim'
env:
- name: PORT
value: "8080"
image: ghcr.io/kiwix/kiwix-serve:3.8.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
volumeMounts:
- mountPath: /data
name: media
readOnly: true
volumes:
- name: media
persistentVolumeClaim:
claimName: kiwix-nfs-storage
---
# Source: kiwix/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-kiwix
namespace: kiwix
labels:
app.kubernetes.io/name: http-route-kiwix
app.kubernetes.io/instance: kiwix
app.kubernetes.io/part-of: kiwix
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- kiwix.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: kiwix
port: 80
weight: 100

View File

@@ -1,945 +0,0 @@
---
# Source: komodo/charts/komodo/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: komodo-cache
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: komodo
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: komodo/charts/komodo/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: komodo-syncs
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: komodo
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: komodo-ferretdb-2
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
app.kubernetes.io/service: komodo-ferretdb-2
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
type: ClusterIP
ports:
- port: 27017
targetPort: 27017
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: komodo-main
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
app.kubernetes.io/service: komodo-main
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9120
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
---
# Source: komodo/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: komodo-periphery-ps10rp
namespace: komodo
labels:
app.kubernetes.io/name: komodo-periphery-ps10rp
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
annotations:
tailscale.com/tailnet-fqdn: komodo-periphery-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: komodo-ferretdb-2
labels:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/name: komodo
app.kubernetes.io/instance: komodo
template:
metadata:
labels:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: FERRETDB_POSTGRESQL_URL
valueFrom:
secretKeyRef:
key: uri
name: komodo-postgresql-17-fdb-cluster-app
image: ghcr.io/ferretdb/ferretdb:2.7.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: komodo-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: komodo
app.kubernetes.io/instance: komodo
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: COMPOSE_LOGGING_DRIVER
value: local
- name: KOMODO_HOST
value: https://komodo.alexlebens.net
- name: KOMODO_TITLE
value: Komodo
- name: PASSKEY
valueFrom:
secretKeyRef:
key: passkey
name: komodo-secret
- name: KOMODO_MONITORING_INTERVAL
value: 15-sec
- name: KOMODO_RESOURCE_POLL_INTERVAL
value: 5-min
- name: KOMODO_PASSKEY
valueFrom:
secretKeyRef:
key: passkey
name: komodo-secret
- name: KOMODO_WEBHOOK_SECRET
valueFrom:
secretKeyRef:
key: webhook
name: komodo-secret
- name: KOMODO_JWT_SECRET
valueFrom:
secretKeyRef:
key: jwt
name: komodo-secret
- name: KOMODO_LOCAL_AUTH
value: "true"
- name: KOMODO_ENABLE_NEW_USERS
value: "true"
- name: KOMODO_DISABLE_NON_ADMIN_CREATE
value: "true"
- name: KOMODO_TRANSPARENT_MODE
value: "false"
- name: PERIPHERY_SSL_ENABLED
value: "false"
- name: DB_USERNAME
valueFrom:
secretKeyRef:
key: user
name: komodo-postgresql-17-fdb-cluster-app
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: komodo-postgresql-17-fdb-cluster-app
- name: KOMODO_DATABASE_URI
value: mongodb://$(DB_USERNAME):$(DB_PASSWORD)@komodo-ferretdb-2.komodo:27017/komodo
- name: KOMODO_OIDC_ENABLED
value: "true"
- name: KOMODO_OIDC_PROVIDER
value: http://authentik-server.authentik/application/o/komodo/
- name: KOMODO_OIDC_REDIRECT_HOST
value: https://authentik.alexlebens.net
- name: KOMODO_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
key: oidc-client-id
name: komodo-secret
- name: KOMODO_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: oidc-client-secret
name: komodo-secret
- name: KOMODO_OIDC_USE_FULL_EMAIL
value: "true"
image: ghcr.io/moghtech/komodo-core:1.19.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /repo-cache
name: cache
- mountPath: /syncs
name: syncs
volumes:
- name: cache
persistentVolumeClaim:
claimName: komodo-cache
- name: syncs
persistentVolumeClaim:
claimName: komodo-syncs
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: komodo-postgresql-17-fdb-cluster
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/ferretdb/postgres-documentdb:17-0.106.0-ferretdb-2.5.0"
imagePullPolicy: IfNotPresent
postgresUID: 999
postgresGID: 999
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-external-backup"
serverName: "komodo-postgresql-17-fdb-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-garage-local-backup"
serverName: "komodo-postgresql-17-fdb-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-recovery"
serverName: komodo-postgresql-17-fdb-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: true
enablePDB: true
postgresql:
shared_preload_libraries:
- pg_cron
- pg_documentdb_core
- pg_documentdb
pg_hba:
- host ferretDB postgres localhost trust
- host ferretDB ferret localhost trust
parameters:
cron.database_name: ferretDB
documentdb.enableBypassDocumentValidation: "true"
documentdb.enableCompact: "true"
documentdb.enableLetAndCollationForQueryMatch: "true"
documentdb.enableNowSystemVariable: "true"
documentdb.enableSchemaValidation: "true"
documentdb.enableSortbyIdPushDownToPrimaryKey: "true"
documentdb.enableUserCrud: "true"
documentdb.maxUserLimit: "100"
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
initdb:
database: ferretDB
owner: ferret
postInitApplicationSQL:
- create extension if not exists pg_cron;
- create extension if not exists documentdb cascade;
- grant documentdb_admin_role to ferret;
---
# Source: komodo/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-secret
namespace: komodo
labels:
app.kubernetes.io/name: komodo-secret
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: passkey
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
metadataPolicy: None
property: passkey
- secretKey: jwt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
metadataPolicy: None
property: jwt
- secretKey: webhook
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
metadataPolicy: None
property: webhook
- secretKey: oidc-client-id
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/komodo
metadataPolicy: None
property: client
- secretKey: oidc-client-secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/komodo
metadataPolicy: None
property: secret
---
# Source: komodo/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-postgresql-17-fdb-cluster-backup-secret
namespace: komodo
labels:
app.kubernetes.io/name: komodo-postgresql-17-fdb-cluster-backup-secret
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: komodo/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-postgresql-17-cluster-backup-secret-garage
namespace: komodo
labels:
app.kubernetes.io/name: komodo-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: komodo/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: https-route-komodo
namespace: komodo
labels:
app.kubernetes.io/name: https-route-komodo
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- komodo.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: komodo-main
port: 80
weight: 100
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "komodo-postgresql-17-fdb-external-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/komodo/komodo-postgresql-17-fdb-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: komodo-postgresql-17-fdb-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: komodo-postgresql-17-fdb-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "komodo-postgresql-17-fdb-garage-local-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/komodo/komodo-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "komodo-postgresql-17-fdb-recovery"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/komodo/komodo-postgresql-17-fdb-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: komodo-postgresql-17-fdb-alert-rules
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/komodo-postgresql-17-fdb
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="komodo"} - cnpg_pg_replication_is_wal_receiver_up{namespace="komodo"}) < 1
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="komodo"} - cnpg_pg_replication_is_wal_receiver_up{namespace="komodo"}) < 2
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="komodo",pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="komodo",pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "komodo-postgresql-17-fdb-daily-backup-scheduled-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: komodo-postgresql-17-fdb-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-external-backup"
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "komodo-postgresql-17-fdb-live-backup-scheduled-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.1
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: komodo-postgresql-17-fdb-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-garage-local-backup"

View File

@@ -1,228 +0,0 @@
---
# Source: kronic/charts/kronic/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: kronic
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
---
# Source: kronic/charts/kronic/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
name: kronic
rules:
- apiGroups:
- ""
resources:
- pods
- events
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
- cronjobs
- cronjobs/status
verbs:
- "*"
---
# Source: kronic/charts/kronic/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
name: kronic
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kronic
subjects:
- kind: ServiceAccount
name: kronic
namespace: "kronic"
---
# Source: kronic/charts/kronic/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: kronic
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
---
# Source: kronic/charts/kronic/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kronic
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
template:
metadata:
labels:
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
spec:
serviceAccountName: kronic
securityContext:
{}
containers:
- name: kronic
securityContext:
{}
image: "ghcr.io/mshade/kronic:v0.1.4"
imagePullPolicy: IfNotPresent
env:
- name: KRONIC_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KRONIC_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: kronic-config-secret
key: password
- name: KRONIC_ADMIN_USERNAME
value: "kronic"
- name: KRONIC_ALLOW_NAMESPACES
value: "gitea,vault,talos,libation,kubernetes-cloudflare-ddns"
- name: KRONIC_NAMESPACE_ONLY
value: ""
ports:
- name: http
containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: http
readinessProbe:
httpGet:
path: /healthz
port: http
resources:
limits:
cpu: 1
memory: 1024Mi
requests:
cpu: 10m
memory: 256Mi
---
# Source: kronic/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kronic-config-secret
namespace: kronic
labels:
app.kubernetes.io/name: kronic-config-secret
app.kubernetes.io/instance: kronic
app.kubernetes.io/part-of: kronic
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/kronic/auth
metadataPolicy: None
property: password
---
# Source: kronic/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: https-route-kronic
namespace: kronic
labels:
app.kubernetes.io/name: https-route-kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/part-of: kronic
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- kronic.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: kronic
port: 80
weight: 100
---
# Source: kronic/charts/kronic/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "kronic-test-connection"
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['kronic:80/healthz']
restartPolicy: Never

View File

@@ -1,254 +0,0 @@
---
# Source: kubelet-serving-cert-approver/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: kubelet-serving-cert-approver
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/part-of: kubelet-serving-cert-approver
pod-security.kubernetes.io/audit: restricted
pod-security.kubernetes.io/enforce: restricted
pod-security.kubernetes.io/warn: restricted
---
# Source: kubelet-serving-cert-approver/charts/kubelet-serving-cert-approver/templates/common.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubelet-serving-cert-approver
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubelet-serving-cert-approver
helm.sh/chart: kubelet-serving-cert-approver-4.4.0
namespace: kubelet-serving-cert-approver
secrets:
- name: kubelet-serving-cert-approver-kubelet-serving-cert-approver-sa-token
---
# Source: kubelet-serving-cert-approver/charts/kubelet-serving-cert-approver/templates/common.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: kubelet-serving-cert-approver-kubelet-serving-cert-approver-sa-token
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubelet-serving-cert-approver
helm.sh/chart: kubelet-serving-cert-approver-4.4.0
annotations:
kubernetes.io/service-account.name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
# Source: kubelet-serving-cert-approver/templates/cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "certificates-kubelet-serving-cert-approver"
namespace: kubelet-serving-cert-approver
labels:
app.kubernetes.io/name: "certificates-kubelet-serving-cert-approver"
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/part-of: kubelet-serving-cert-approver
rules:
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- get
- list
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/approval
verbs:
- update
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- certificates.k8s.io
resourceNames:
- kubernetes.io/kubelet-serving
resources:
- signers
verbs:
- approve
---
# Source: kubelet-serving-cert-approver/templates/cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "events-kubelet-serving-cert-approver"
namespace: kubelet-serving-cert-approver
labels:
app.kubernetes.io/name: "events-kubelet-serving-cert-approver"
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/part-of: kubelet-serving-cert-approver
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: kubelet-serving-cert-approver/templates/cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/part-of: kubelet-serving-cert-approver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "certificates-kubelet-serving-cert-approver"
subjects:
- kind: ServiceAccount
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
# Source: kubelet-serving-cert-approver/templates/role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "events-kubelet-serving-cert-approver"
namespace: kubelet-serving-cert-approver
labels:
app.kubernetes.io/name: "events-kubelet-serving-cert-approver"
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/part-of: kubelet-serving-cert-approver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "events-kubelet-serving-cert-approver"
subjects:
- kind: ServiceAccount
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
# Source: kubelet-serving-cert-approver/charts/kubelet-serving-cert-approver/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: kubelet-serving-cert-approver
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/service: kubelet-serving-cert-approver
helm.sh/chart: kubelet-serving-cert-approver-4.4.0
namespace: kubelet-serving-cert-approver
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: health
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
---
# Source: kubelet-serving-cert-approver/charts/kubelet-serving-cert-approver/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubelet-serving-cert-approver
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubelet-serving-cert-approver
helm.sh/chart: kubelet-serving-cert-approver-4.4.0
namespace: kubelet-serving-cert-approver
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: kubelet-serving-cert-approver
template:
metadata:
annotations:
checksum/secrets: 591a33eca0bc5c4a8475d0538f3f4840841582c86a3ac2c97147b2b00e5774c5
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
spec:
enableServiceLinks: false
serviceAccountName: kubelet-serving-cert-approver
automountServiceAccountToken: true
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: DoesNotExist
- key: node-role.kubernetes.io/control-plane
operator: DoesNotExist
weight: 100
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- serve
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: ghcr.io/alex1989hu/kubelet-serving-cert-approver:0.10.0
imagePullPolicy: Always
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true

View File

@@ -1,96 +0,0 @@
---
# Source: kubernetes-cloudflare-ddns/charts/kubernetes-cloudflare-ddns/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubernetes-cloudflare-ddns
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubernetes-cloudflare-ddns
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-cloudflare-ddns
helm.sh/chart: kubernetes-cloudflare-ddns-4.4.0
namespace: kubernetes-cloudflare-ddns
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "30 4 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubernetes-cloudflare-ddns
app.kubernetes.io/name: kubernetes-cloudflare-ddns
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- envFrom:
- secretRef:
name: kubernetes-cloudflare-ddns-secret
image: kubitodev/kubernetes-cloudflare-ddns:2.0.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: kubernetes-cloudflare-ddns/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kubernetes-cloudflare-ddns-secret
namespace: kubernetes-cloudflare-ddns
labels:
app.kubernetes.io/name: kubernetes-cloudflare-ddns-secret
app.kubernetes.io/instance: kubernetes-cloudflare-ddns
app.kubernetes.io/part-of: kubernetes-cloudflare-ddns
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AUTH_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: token
- secretKey: NAME
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: name
- secretKey: RECORD_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: record-id
- secretKey: ZONE_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: zone-id

View File

@@ -1,129 +0,0 @@
---
# Source: libation/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: libation-nfs-storage
namespace: libation
labels:
app.kubernetes.io/name: libation-nfs-storage
app.kubernetes.io/instance: libation
app.kubernetes.io/part-of: libation
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Audiobooks/
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: libation/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: libation-config
namespace: libation
labels:
app.kubernetes.io/name: libation-config
app.kubernetes.io/instance: libation
app.kubernetes.io/part-of: libation
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeMode: Filesystem
---
# Source: libation/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: libation-nfs-storage
namespace: libation
labels:
app.kubernetes.io/name: libation-nfs-storage
app.kubernetes.io/instance: libation
app.kubernetes.io/part-of: libation
spec:
volumeName: libation-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: libation/charts/libation/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: libation
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: libation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: libation
helm.sh/chart: libation-4.4.0
namespace: libation
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "30 4 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: libation
app.kubernetes.io/name: libation
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- env:
- name: SLEEP_TIME
value: "-1"
- name: LIBATION_BOOKS_DIR
value: /data
image: rmcrackan/libation:12.8.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /data
name: data
volumes:
- name: config
persistentVolumeClaim:
claimName: libation-config
- name: data
persistentVolumeClaim:
claimName: libation-nfs-storage

View File

@@ -1,928 +0,0 @@
---
# Source: lidarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidarr/charts/lidarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidarr-config
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: lidarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-nfs-storage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-nfs-storage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
volumeName: lidarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidarr
labels:
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
app.kubernetes.io/service: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8686
protocol: TCP
name: http
- port: 9792
targetPort: 9792
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
---
# Source: lidarr/charts/lidarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidarr
helm.sh/chart: lidarr-4.4.0
namespace: lidarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidarr
app.kubernetes.io/name: lidarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/lidarr:2.14.5@sha256:5e1235d00b5d1c1f60ca0d472e554a6611aef41aa7b5b6d88260214bf4809af0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- lidarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9792"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: lidarr-config
- name: media
persistentVolumeClaim:
claimName: lidarr-nfs-storage
---
# Source: lidarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: lidarr2-postgresql-17-cluster
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
serverName: "lidarr2-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
serverName: "lidarr2-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: lidarr2-postgresql-17-backup-1
externalClusters:
- name: lidarr2-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "lidarr2-postgresql-17-recovery"
serverName: lidarr2-postgresql-17-backup-1
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-config-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/lidarr2/lidarr2-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: lidarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidarr-postgresql-17-cluster-backup-secret-garage
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: lidarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: http-route-lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidarr
port: 80
weight: 100
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-external-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/lidarr2/lidarr2-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-garage-local-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: lidarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: lidarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "lidarr2-postgresql-17-recovery"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/lidarr/lidarr2-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: lidarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: lidarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr2-postgresql-17-alert-rules
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/lidarr2-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 1
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="lidarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="lidarr"}) < 2
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="lidarr", persistentvolumeclaim=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="lidarr",pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "lidarr/lidarr2-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="lidarr", pod=~"lidarr2-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: lidarr
cnpg_cluster: lidarr2-postgresql-17-cluster
---
# Source: lidarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
groups:
- name: lidarr
rules:
- alert: ExportarrAbsent
annotations:
description: Lidarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*lidarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: LidarrDown
annotations:
description: Lidarr service is down.
summary: Lidarr is down.
expr: |
lidarr_system_status{job=~".*lidarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: lidarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: lidarr-config-backup-source
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr-config-backup-source
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
sourcePVC: lidarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: lidarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-daily-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-external-backup"
---
# Source: lidarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "lidarr2-postgresql-17-live-backup-scheduled-backup"
namespace: lidarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: lidarr2-postgresql-17
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: lidarr2-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "lidarr2-postgresql-17-garage-local-backup"
---
# Source: lidarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: lidarr
namespace: lidarr
labels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
app.kubernetes.io/part-of: lidarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: lidarr
app.kubernetes.io/instance: lidarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,221 +0,0 @@
---
# Source: lidatube/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidatube-nfs-storage
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-nfs-storage
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Music
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidatube/charts/lidatube/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidatube-config
labels:
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
helm.sh/chart: lidatube-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidatube
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: lidatube/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidatube-nfs-storage
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-nfs-storage
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
volumeName: lidatube-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidatube/charts/lidatube/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidatube
labels:
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
app.kubernetes.io/service: lidatube
helm.sh/chart: lidatube-4.4.0
namespace: lidatube
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/name: lidatube
---
# Source: lidatube/charts/lidatube/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidatube
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
helm.sh/chart: lidatube-4.4.0
namespace: lidatube
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidatube
app.kubernetes.io/instance: lidatube
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/name: lidatube
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: lidarr_address
value: http://lidarr.lidarr:80
- name: lidarr_api_key
valueFrom:
secretKeyRef:
key: lidarr_api_key
name: lidatube-secret
- name: sleep_interval
value: "360"
- name: sync_schedule
value: "4"
- name: attempt_lidarr_import
value: "true"
image: thewicklowwolf/lidatube:0.2.41
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /lidatube/config
name: config
- mountPath: /lidatube/downloads
name: music
volumes:
- name: config
persistentVolumeClaim:
claimName: lidatube-config
- name: music
persistentVolumeClaim:
claimName: lidatube-nfs-storage
---
# Source: lidatube/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidatube-secret
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-secret
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: lidarr_api_key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/lidarr2/key
metadataPolicy: None
property: key
---
# Source: lidatube/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidatube
namespace: lidatube
labels:
app.kubernetes.io/name: http-route-lidatube
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidatube.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidatube
port: 80
weight: 100

View File

@@ -1,180 +0,0 @@
---
# Source: listenarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: listenarr-nfs-storage
namespace: listenarr
labels:
app.kubernetes.io/name: listenarr-nfs-storage
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Audiobooks
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: listenarr/charts/listenarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: listenarr
labels:
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
helm.sh/chart: listenarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: listenarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: listenarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: listenarr-nfs-storage
namespace: listenarr
labels:
app.kubernetes.io/name: listenarr-nfs-storage
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
volumeName: listenarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: listenarr/charts/listenarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: listenarr
labels:
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
app.kubernetes.io/service: listenarr
helm.sh/chart: listenarr-4.4.0
namespace: listenarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/name: listenarr
---
# Source: listenarr/charts/listenarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: listenarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
helm.sh/chart: listenarr-4.4.0
namespace: listenarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: listenarr
app.kubernetes.io/instance: listenarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/name: listenarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: LISTENARR_PUBLIC_URL
value: https://listenarr.alexlebens.net
image: therobbiedavis/listenarr:canary-0.2.35
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /app/config
name: config
- mountPath: /data
name: media
volumes:
- name: config
persistentVolumeClaim:
claimName: listenarr
- name: media
persistentVolumeClaim:
claimName: listenarr-nfs-storage
---
# Source: listenarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-listenarr
namespace: listenarr
labels:
app.kubernetes.io/name: http-route-listenarr
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- listenarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: listenarr
port: 80
weight: 100

View File

@@ -1,270 +0,0 @@
---
# Source: local-path-provisioner/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/part-of: local-path-provisioner
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
imagePullSecrets:
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: local-path-config
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
data:
config.json: |-
{
"nodePathMap": [
{
"node": "talos-2di-ktg",
"paths": [
"/var/local-path-provisioner"
]
},
{
"node": "talos-9vs-6hh",
"paths": [
"/var/local-path-provisioner"
]
},
{
"node": "talos-aoq-hpv",
"paths": [
"/var/local-path-provisioner"
]
}
]
}
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
namespace: local-path-provisioner
spec:
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/disk-pressure
operator: Exists
effect: NoSchedule
containers:
- name: helper-pod
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
resources:
{}
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
annotations:
storageclass.kubernetes.io/is-default-class: "false"
defaultVolumeType: "hostPath"
provisioner: cluster.local/local-path-provisioner
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
allowVolumeExpansion: true
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims", "configmaps", "pods", "pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner
subjects:
- kind: ServiceAccount
name: local-path-provisioner
namespace: local-path-provisioner
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: local-path-provisioner
subjects:
- kind: ServiceAccount
name: local-path-provisioner
namespace: local-path-provisioner
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: local-path-provisioner
app.kubernetes.io/instance: local-path-provisioner
template:
metadata:
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: local-path-provisioner
securityContext:
{}
containers:
- name: local-path-provisioner
securityContext:
{}
image: "rancher/local-path-provisioner:v0.0.32"
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
- --service-account-name
- local-path-provisioner
- --provisioner-name
- cluster.local/local-path-provisioner
- --helper-image
- "busybox:1.37.0"
- --configmap-name
- local-path-config
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_MOUNT_PATH
value: /etc/config/
resources:
{}
volumes:
- name: config-volume
configMap:
name: local-path-config
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- talos-2di-ktg
- talos-9vs-6hh
- talos-aoq-hpv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,285 +0,0 @@
---
# Source: metrics-server/charts/metrics-server/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
---
# Source: metrics-server/charts/metrics-server/templates/clusterrole-aggregated-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server-aggregated-reader
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
# Source: metrics-server/charts/metrics-server/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
# Source: metrics-server/charts/metrics-server/templates/clusterrolebinding-auth-delegator.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
# Source: metrics-server/charts/metrics-server/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
# Source: metrics-server/charts/metrics-server/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
# Source: metrics-server/charts/metrics-server/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
---
# Source: metrics-server/charts/metrics-server/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
template:
metadata:
labels:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
spec:
serviceAccountName: metrics-server
priorityClassName: "system-cluster-critical"
containers:
- name: metrics-server
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
image: registry.k8s.io/metrics-server/metrics-server:v0.8.0
imagePullPolicy: IfNotPresent
args:
- --secure-port=10250
- --cert-dir=/tmp
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --authorization-always-allow-paths=/metrics
ports:
- name: https
protocol: TCP
containerPort: 10250
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
initialDelaySeconds: 0
periodSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
volumeMounts:
- name: tmp
mountPath: /tmp
resources:
requests:
cpu: 100m
memory: 200Mi
volumes:
- name: tmp
emptyDir: {}
---
# Source: metrics-server/charts/metrics-server/templates/apiservice.yaml
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
port: 443
version: v1beta1
versionPriority: 100
---
# Source: metrics-server/charts/metrics-server/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: metrics-server
namespace: kube-system
labels:
helm.sh/chart: metrics-server-3.13.0
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: app.kubernetes.io/instance
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: metrics-server
endpoints:
- port: https
path: /metrics
scheme: https
tlsConfig:
insecureSkipVerify: true
interval: 1m
scrapeTimeout: 10s

File diff suppressed because it is too large Load Diff

View File

@@ -1,211 +0,0 @@
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: nfs-nfs-subdir-external-provisioner
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: nfs-client
annotations:
provisioner: cluster.local/nfs-nfs-subdir-external-provisioner
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
parameters:
archiveOnDelete: "true"
mountOptions:
- hard
- vers=4
- minorversion=1
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/persistentvolume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-nfs-subdir-external-provisioner
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
nfs-subdir-external-provisioner: nfs-nfs-subdir-external-provisioner
spec:
capacity:
storage: 10Mi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
mountOptions:
- hard
- vers=4
- minorversion=1
nfs:
server: 10.232.1.64
path: /volume2/Talos
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/persistentvolumeclaim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-nfs-nfs-subdir-external-provisioner
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
storageClassName: ""
selector:
matchLabels:
nfs-subdir-external-provisioner: nfs-nfs-subdir-external-provisioner
resources:
requests:
storage: 10Mi
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: nfs-nfs-subdir-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: run-nfs-nfs-subdir-external-provisioner
subjects:
- kind: ServiceAccount
name: nfs-nfs-subdir-external-provisioner
namespace: nfs
roleRef:
kind: ClusterRole
name: nfs-nfs-subdir-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/role.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: leader-locking-nfs-nfs-subdir-external-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/rolebinding.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: leader-locking-nfs-nfs-subdir-external-provisioner
subjects:
- kind: ServiceAccount
name: nfs-nfs-subdir-external-provisioner
namespace: nfs
roleRef:
kind: Role
name: leader-locking-nfs-nfs-subdir-external-provisioner
apiGroup: rbac.authorization.k8s.io
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-nfs-subdir-external-provisioner
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-subdir-external-provisioner
release: nfs
template:
metadata:
annotations:
labels:
app: nfs-subdir-external-provisioner
release: nfs
spec:
serviceAccountName: nfs-nfs-subdir-external-provisioner
securityContext:
{}
containers:
- name: nfs-subdir-external-provisioner
image: "registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2"
imagePullPolicy: IfNotPresent
securityContext:
{}
volumeMounts:
- name: nfs-subdir-external-provisioner-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cluster.local/nfs-nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 10.232.1.64
- name: NFS_PATH
value: /volume2/Talos
volumes:
- name: nfs-subdir-external-provisioner-root
persistentVolumeClaim:
claimName: pvc-nfs-nfs-subdir-external-provisioner

View File

@@ -1,195 +0,0 @@
---
# Source: ntfy/charts/ntfy/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ntfy
labels:
app.kubernetes.io/instance: ntfy
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ntfy
helm.sh/chart: ntfy-4.4.0
namespace: ntfy
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: ntfy/charts/ntfy/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: ntfy
labels:
app.kubernetes.io/instance: ntfy
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ntfy
app.kubernetes.io/service: ntfy
helm.sh/chart: ntfy-4.4.0
namespace: ntfy
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ntfy
app.kubernetes.io/name: ntfy
---
# Source: ntfy/charts/ntfy/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ntfy
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ntfy
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ntfy
helm.sh/chart: ntfy-4.4.0
namespace: ntfy
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: ntfy
app.kubernetes.io/instance: ntfy
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ntfy
app.kubernetes.io/name: ntfy
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- serve
env:
- name: TZ
value: US/Central
- name: NTFY_BASE_URL
value: https://ntfy.alexlebens.net
- name: NTFY_LISTEN_HTTP
value: :80
- name: NTFY_CACHE_FILE
value: /var/cache/ntfy/cache.db
- name: NTFY_CACHE_DURATION
value: 36h
- name: NTFY_CACHE_STARTUP_QUERIES
value: |
pragma journal_mode = WAL;
pragma synchronous = normal;
pragma temp_store = memory;
pragma busy_timeout = 15000;
vacuum;
- name: NTFY_BEHIND_PROXY
value: "true"
- name: NTFY_ATTACHMENT_CACHE_DIR
value: /var/cache/ntfy/attachments
- name: NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT
value: 4G
- name: NTFY_ATTACHMENT_FILE_SIZE_LIMIT
value: 15M
- name: NTFY_ATTACHMENT_EXPIRY_DURATION
value: 36h
- name: NTFY_ENABLE_SIGNUP
value: "false"
- name: NTFY_ENABLE_LOGIN
value: "true"
- name: NTFY_ENABLE_RESERVATIONS
value: "false"
- name: NTFY_AUTH_FILE
value: /var/cache/ntfy/user.db
- name: NTFY_AUTH_DEFAULT_ACCESS
value: deny-all
- name: NTFY_METRICS_LISTEN_HTTP
value: :9090
- name: NTFY_LOG_LEVEL
value: info
image: binwiederhier/ntfy:v2.15.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /var/cache/ntfy
name: cache
volumes:
- name: cache
persistentVolumeClaim:
claimName: ntfy
---
# Source: ntfy/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-ntfy
namespace: ntfy
labels:
app.kubernetes.io/name: http-route-ntfy
app.kubernetes.io/instance: ntfy
app.kubernetes.io/part-of: ntfy
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ntfy.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: ntfy
port: 80
weight: 100
---
# Source: ntfy/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: ntfy
namespace: ntfy
labels:
app.kubernetes.io/name: ntfy
app.kubernetes.io/instance: ntfy
app.kubernetes.io/part-of: ntfy
spec:
selector:
matchLabels:
app.kubernetes.io/name: ntfy
app.kubernetes.io/instance: ntfy
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@@ -1,100 +0,0 @@
---
# Source: omni-tools/charts/omni-tools/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: omni-tools
labels:
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: omni-tools
app.kubernetes.io/service: omni-tools
helm.sh/chart: omni-tools-4.4.0
namespace: omni-tools
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/name: omni-tools
---
# Source: omni-tools/charts/omni-tools/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: omni-tools
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: omni-tools
helm.sh/chart: omni-tools-4.4.0
namespace: omni-tools
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: omni-tools
app.kubernetes.io/instance: omni-tools
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/name: omni-tools
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: iib0011/omni-tools:0.6.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
---
# Source: omni-tools/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-omni-tools
namespace: omni-tools
labels:
app.kubernetes.io/name: http-route-omni-tools
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/part-of: omni-tools
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- omni-tools.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: omni-tools
port: 80
weight: 100

View File

@@ -1,988 +0,0 @@
---
# Source: outline/charts/outline/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: outline
labels:
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: outline
app.kubernetes.io/service: outline
helm.sh/chart: outline-4.4.0
namespace: outline
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: outline
---
# Source: outline/charts/cloudflared-outline/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline-cloudflared-outline
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-outline
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-outline-1.23.1
namespace: outline
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-outline
app.kubernetes.io/instance: outline
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: cloudflared-outline
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: outline-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: outline/charts/outline/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: outline
helm.sh/chart: outline-4.4.0
namespace: outline
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: outline
app.kubernetes.io/instance: outline
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: outline
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: NODE_ENV
value: production
- name: URL
value: https://wiki.alexlebens.dev
- name: PORT
value: "3000"
- name: SECRET_KEY
valueFrom:
secretKeyRef:
key: secret-key
name: outline-key-secret
- name: UTILS_SECRET
valueFrom:
secretKeyRef:
key: utils-key
name: outline-key-secret
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
key: username
name: outline-postgresql-17-cluster-app
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_NAME
valueFrom:
secretKeyRef:
key: dbname
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_HOST
valueFrom:
secretKeyRef:
key: host
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_PORT
valueFrom:
secretKeyRef:
key: port
name: outline-postgresql-17-cluster-app
- name: DATABASE_URL
value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)
- name: DATABASE_URL_TEST
value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test
- name: DATABASE_CONNECTION_POOL_MIN
value: "2"
- name: DATABASE_CONNECTION_POOL_MAX
value: "20"
- name: PGSSLMODE
value: disable
- name: REDIS_URL
value: redis://redis-replication-outline-master.outline:6379
- name: FILE_STORAGE
value: s3
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: ceph-bucket-outline
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: ceph-bucket-outline
- name: AWS_REGION
value: us-east-1
- name: AWS_S3_UPLOAD_BUCKET_NAME
valueFrom:
configMapKeyRef:
key: BUCKET_NAME
name: ceph-bucket-outline
- name: AWS_S3_UPLOAD_BUCKET_URL
value: https://objects.alexlebens.dev
- name: AWS_S3_FORCE_PATH_STYLE
value: "true"
- name: AWS_S3_ACL
value: private
- name: FILE_STORAGE_UPLOAD_MAX_SIZE
value: "26214400"
- name: FORCE_HTTPS
value: "false"
- name: ENABLE_UPDATES
value: "false"
- name: WEB_CONCURRENCY
value: "1"
- name: FILE_STORAGE_IMPORT_MAX_SIZE
value: "5.12e+06"
- name: LOG_LEVEL
value: info
- name: DEFAULT_LANGUAGE
value: en_US
- name: RATE_LIMITER_ENABLED
value: "false"
- name: DEVELOPMENT_UNSAFE_INLINE_CSP
value: "false"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
key: client
name: outline-oidc-secret
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: secret
name: outline-oidc-secret
- name: OIDC_AUTH_URI
value: https://auth.alexlebens.dev/application/o/authorize/
- name: OIDC_TOKEN_URI
value: https://auth.alexlebens.dev/application/o/token/
- name: OIDC_USERINFO_URI
value: https://auth.alexlebens.dev/application/o/userinfo/
- name: OIDC_USERNAME_CLAIM
value: email
- name: OIDC_DISPLAY_NAME
value: Authentik
- name: OIDC_SCOPES
value: openid profile email
image: outlinewiki/outline:1.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
---
# Source: outline/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: outline-postgresql-17-cluster
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "outline-postgresql-17-external-backup"
serverName: "outline-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "outline-postgresql-17-garage-local-backup"
serverName: "outline-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-recovery"
serverName: outline-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: outline-postgresql-17-backup-1
externalClusters:
- name: outline-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "outline-postgresql-17-recovery"
serverName: outline-postgresql-17-backup-1
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-key-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-key-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/outline/key
metadataPolicy: None
property: secret-key
- secretKey: utils-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/outline/key
metadataPolicy: None
property: utils-key
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-oidc-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-oidc-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: client
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/outline
metadataPolicy: None
property: client
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/outline
metadataPolicy: None
property: secret
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-cloudflared-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-cloudflared-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/outline
metadataPolicy: None
property: token
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-postgresql-17-cluster-backup-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-postgresql-17-cluster-backup-secret-garage
namespace: outline
labels:
app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: outline/templates/object-bucket-claim.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket-outline
labels:
app.kubernetes.io/name: ceph-bucket-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
generateBucketName: bucket-outline
storageClassName: ceph-bucket
additionalConfig:
bucketPolicy: |
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor",
"Effect": "Allow",
"Action": [
"s3:GetObjectAcl",
"s3:DeleteObject",
"s3:PutObject",
"s3:GetObject",
"s3:PutObjectAcl"
],
"Resource": "arn:aws:s3:::bucket-outline-630c57e0-d475-4d78-926c-c1c082291d73/*"
}
]
}
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-external-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-garage-local-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-recovery"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: outline/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: outline-postgresql-17-alert-rules
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/outline-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 1
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 2
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
---
# Source: outline/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-outline
namespace: outline
labels:
app.kubernetes.io/name: redis-replication-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "outline-postgresql-17-daily-backup-scheduled-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: outline-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-external-backup"
---
# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "outline-postgresql-17-live-backup-scheduled-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: outline-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-garage-local-backup"
---
# Source: outline/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-outline
namespace: outline
labels:
app.kubernetes.io/name: redis-replication-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -1,215 +0,0 @@
---
# Source: overseerr/charts/app-template/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: overseerr-main
labels:
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
helm.sh/chart: app-template-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: overseerr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: overseerr/charts/app-template/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: overseerr
labels:
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
app.kubernetes.io/service: overseerr
helm.sh/chart: app-template-4.4.0
namespace: overseerr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5055
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/name: overseerr
---
# Source: overseerr/charts/app-template/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: overseerr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
helm.sh/chart: app-template-4.4.0
namespace: overseerr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: overseerr
app.kubernetes.io/instance: overseerr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/name: overseerr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/sct/overseerr:1.34.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
volumeMounts:
- mountPath: /app/config
name: main
volumes:
- name: main
persistentVolumeClaim:
claimName: overseerr-main
---
# Source: overseerr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: overseerr-main-backup-secret
namespace: overseerr
labels:
app.kubernetes.io/name: overseerr-main-backup-secret
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/overseerr/overseerr-main"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: overseerr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-overseerr
namespace: overseerr
labels:
app.kubernetes.io/name: http-route-overseerr
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- overseerr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: overseerr
port: 80
weight: 100
---
# Source: overseerr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: overseerr-main-backup-source
namespace: overseerr
labels:
app.kubernetes.io/name: overseerr-main-backup-source
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
sourcePVC: overseerr-main
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: overseerr-main-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -1,309 +0,0 @@
---
# Source: pgadmin4/charts/pgadmin4/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pgadmin4-data
labels:
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: pgadmin
helm.sh/chart: pgadmin4-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: pgadmin
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: pgadmin4/charts/pgadmin4/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: pgadmin
labels:
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: pgadmin
app.kubernetes.io/service: pgadmin
helm.sh/chart: pgadmin4-4.4.0
namespace: pgadmin
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/name: pgadmin
---
# Source: pgadmin4/charts/pgadmin4/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: pgadmin
helm.sh/chart: pgadmin4-4.4.0
namespace: pgadmin
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: pgadmin
app.kubernetes.io/instance: pgadmin
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/name: pgadmin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
/bin/chown -R 5050:5050 /var/lib/pgadmin
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-chmod-data
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /var/lib/pgadmin
name: data
containers:
- env:
- name: PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION
value: "False"
- name: PGADMIN_DEFAULT_EMAIL
value: alexanderlebens@gmail.com
- name: PGADMIN_DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
key: pgadmin-password
name: pgadmin-password-secret
envFrom:
- secretRef:
name: pgadmin-env-secret
image: dpage/pgadmin4:9.10
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
securityContext:
runAsGroup: 5050
runAsUser: 5050
volumeMounts:
- mountPath: /var/lib/pgadmin
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: pgadmin4-data
---
# Source: pgadmin4/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: pgadmin-password-secret
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-password-secret
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: pgadmin-password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/auth
metadataPolicy: None
property: pgadmin-password
---
# Source: pgadmin4/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: pgadmin-env-secret
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-env-secret
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: PGADMIN_CONFIG_AUTHENTICATION_SOURCES
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/env
metadataPolicy: None
property: PGADMIN_CONFIG_AUTHENTICATION_SOURCES
- secretKey: PGADMIN_CONFIG_OAUTH2_AUTO_CREATE_USER
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/env
metadataPolicy: None
property: PGADMIN_CONFIG_OAUTH2_AUTO_CREATE_USER
- secretKey: PGADMIN_CONFIG_OAUTH2_CONFIG
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/env
metadataPolicy: None
property: PGADMIN_CONFIG_OAUTH2_CONFIG
---
# Source: pgadmin4/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: pgadmin-data-backup-secret
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-data-backup-secret
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/pgadmin/pgadmin-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: pgadmin4/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-pgadmin
namespace: pgadmin
labels:
app.kubernetes.io/name: http-route-pgadmin
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- pgadmin.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: pgadmin
port: 80
weight: 100
---
# Source: pgadmin4/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: pgadmin-data-backup-source
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-data-backup-source
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
sourcePVC: pgadmin-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: pgadmin-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 5050
runAsGroup: 5050
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -1,773 +0,0 @@
---
# Source: photoview/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: photoview-nfs-storage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-nfs-storage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Pictures
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: photoview/charts/photoview/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: photoview-cache
labels:
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: photoview/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: photoview-nfs-storage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-nfs-storage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
volumeName: photoview-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: photoview/charts/photoview/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: photoview
labels:
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
app.kubernetes.io/service: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/name: photoview
---
# Source: photoview/charts/photoview/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: photoview
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: photoview
app.kubernetes.io/instance: photoview
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/name: photoview
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
/bin/chown -R 999:999 /app/cache
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-chmod-data
resources:
requests:
cpu: 100m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /app/cache
name: cache
containers:
- env:
- name: PHOTOVIEW_DATABASE_DRIVER
value: postgres
- name: PHOTOVIEW_POSTGRES_URL
valueFrom:
secretKeyRef:
key: uri
name: photoview-postgresql-17-cluster-app
- name: PHOTOVIEW_MEDIA_CACHE
value: /app/cache
image: photoview/photoview:2.4.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
volumeMounts:
- mountPath: /app/cache
name: cache
- mountPath: /photos
name: media
readOnly: true
volumes:
- name: cache
persistentVolumeClaim:
claimName: photoview-cache
- name: media
persistentVolumeClaim:
claimName: photoview-nfs-storage
---
# Source: photoview/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: photoview-postgresql-17-cluster
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "photoview-postgresql-17-external-backup"
serverName: "photoview-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "photoview-postgresql-17-garage-local-backup"
serverName: "photoview-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-recovery"
serverName: photoview-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: photoview-postgresql-17-backup-1
externalClusters:
- name: photoview-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "photoview-postgresql-17-recovery"
serverName: photoview-postgresql-17-backup-1
---
# Source: photoview/templates/external-secrets.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: photoview-postgresql-17-cluster-backup-secret
namespace: photoview
labels:
app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: photoview/templates/external-secrets.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: photoview-postgresql-17-cluster-backup-secret-garage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: photoview/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-photoview
namespace: photoview
labels:
app.kubernetes.io/name: http-route-photoview
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- photoview.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: photoview
port: 80
weight: 100
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-external-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-garage-local-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-recovery"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: photoview/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: photoview-postgresql-17-alert-rules
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/photoview-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 1
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 2
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
---
# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "photoview-postgresql-17-daily-backup-scheduled-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: photoview-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-external-backup"
---
# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "photoview-postgresql-17-live-backup-scheduled-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: photoview-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-garage-local-backup"

View File

@@ -1,190 +0,0 @@
---
# Source: plex/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: plex-nfs-storage
namespace: plex
labels:
app.kubernetes.io/name: plex-nfs-storage
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: plex/charts/plex/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: plex-config
labels:
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Gi"
storageClassName: "ceph-block"
---
# Source: plex/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-nfs-storage
namespace: plex
labels:
app.kubernetes.io/name: plex-nfs-storage
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
volumeName: plex-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: plex/charts/plex/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: plex
labels:
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
app.kubernetes.io/service: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
type: LoadBalancer
ports:
- port: 32400
targetPort: 32400
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/name: plex
---
# Source: plex/charts/plex/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: plex
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: plex
app.kubernetes.io/instance: plex
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/name: plex
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: VERSION
value: docker
- name: PLEX_CLAIM
value: claim-XmGK2o9x54PbCzQaqj-J
image: ghcr.io/linuxserver/plex:1.42.2@sha256:ab81c7313fb5dc4d1f9562e5bbd5e5877a8a3c5ca6b9f9fff3437b5096a2b123
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /transcode
name: transcode
volumes:
- name: config
persistentVolumeClaim:
claimName: plex-config
- name: media
persistentVolumeClaim:
claimName: plex-nfs-storage
- emptyDir: {}
name: transcode
---
# Source: plex/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-plex
namespace: plex
labels:
app.kubernetes.io/name: http-route-plex
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- plex.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: plex
port: 32400
weight: 100

File diff suppressed because it is too large Load Diff

View File

@@ -1,235 +0,0 @@
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: prowlarr-config
labels:
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
helm.sh/chart: prowlarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: prowlarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: prowlarr
labels:
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
app.kubernetes.io/service: prowlarr
helm.sh/chart: prowlarr-4.4.0
namespace: prowlarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9696
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
---
# Source: prowlarr/charts/prowlarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prowlarr
helm.sh/chart: prowlarr-4.4.0
namespace: prowlarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: prowlarr
app.kubernetes.io/instance: prowlarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsUser: 568
supplementalGroups:
- 44
- 100
- 109
- 65539
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/linuxserver/prowlarr:2.3.0@sha256:475853535de3de8441b87c1457c30f2e695f4831228b12b6b7274e9da409d874
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: prowlarr-config
---
# Source: prowlarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: prowlarr-config-backup-secret
namespace: prowlarr
labels:
app.kubernetes.io/name: prowlarr-config-backup-secret
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/prowlarr/prowlarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: prowlarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-prowlarr
namespace: prowlarr
labels:
app.kubernetes.io/name: http-route-prowlarr
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- prowlarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: prowlarr
port: 80
weight: 100
---
# Source: prowlarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: prowlarr-config-backup-source
namespace: prowlarr
labels:
app.kubernetes.io/name: prowlarr-config-backup-source
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/part-of: prowlarr
spec:
sourcePVC: prowlarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: prowlarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups:
- 44
- 100
- 109
- 65539
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

File diff suppressed because it is too large Load Diff

View File

@@ -1,930 +0,0 @@
---
# Source: radarr-4k/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-4k-nfs-storage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-nfs-storage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-4k-config
labels:
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-4k
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-4k/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-4k-nfs-storage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-nfs-storage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
volumeName: radarr-4k-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-4k
labels:
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/service: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
namespace: radarr-4k
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/name: radarr-4k
---
# Source: radarr-4k/charts/radarr-4k/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-4k
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-4k
helm.sh/chart: radarr-4k-4.4.0
namespace: radarr-4k
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/name: radarr-4k
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-4k-config
- name: media
persistentVolumeClaim:
claimName: radarr-4k-nfs-storage
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-4k-postgresql-17-cluster
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-external-backup"
serverName: "radarr5-4k-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup"
serverName: "radarr5-4k-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-recovery"
serverName: radarr5-4k-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-4k-postgresql-17-backup-1
externalClusters:
- name: radarr5-4k-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-recovery"
serverName: radarr5-4k-postgresql-17-backup-1
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-config-backup-secret
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-config-backup-secret
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-4k/radarr5-4k-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-postgresql-17-cluster-backup-secret
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-4k/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: http-route-radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-4k.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-4k
port: 80
weight: 100
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-external-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-garage-local-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-4k/radarr5-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-4k-postgresql-17-recovery"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-4k/radarr5-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-4k-postgresql-17-alert-rules
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-4k-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-4k"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-4k", persistentvolumeclaim=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-4k",pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-4k/radarr5-4k-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-4k", pod=~"radarr5-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-4k
cnpg_cluster: radarr5-4k-postgresql-17-cluster
---
# Source: radarr-4k/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
groups:
- name: radarr-4k
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr 4K Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-4k.*"} == 1)
for: 5m
labels:
severity: critical
- alert: Radarr4kDown
annotations:
description: Radarr 4K service is down.
summary: Radarr 4K is down.
expr: |
radarr_4k_system_status{job=~".*radarr-4k.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-4k/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-4k-config-backup-source
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k-config-backup-source
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
sourcePVC: radarr-4k-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-4k-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-4k-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-external-backup"
---
# Source: radarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-4k-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-4k-postgresql-17
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-4k-postgresql-17-garage-local-backup"
---
# Source: radarr-4k/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-4k
namespace: radarr-4k
labels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
app.kubernetes.io/part-of: radarr-4k
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-4k
app.kubernetes.io/instance: radarr-4k
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,928 +0,0 @@
---
# Source: radarr-anime/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-anime-nfs-storage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-nfs-storage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-anime-config
labels:
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-anime
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-anime/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-anime-nfs-storage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-nfs-storage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
volumeName: radarr-anime-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-anime
labels:
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/service: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
namespace: radarr-anime
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/name: radarr-anime
---
# Source: radarr-anime/charts/radarr-anime/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-anime
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-anime
helm.sh/chart: radarr-anime-4.4.0
namespace: radarr-anime
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/name: radarr-anime
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-anime-config
- name: media
persistentVolumeClaim:
claimName: radarr-anime-nfs-storage
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-anime-postgresql-17-cluster
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-external-backup"
serverName: "radarr5-anime-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup"
serverName: "radarr5-anime-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-recovery"
serverName: radarr5-anime-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-anime-postgresql-17-backup-1
externalClusters:
- name: radarr5-anime-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-recovery"
serverName: radarr5-anime-postgresql-17-backup-1
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-config-backup-secret
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-config-backup-secret
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-anime/radarr5-anime-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-postgresql-17-cluster-backup-secret
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-anime/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: http-route-radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-anime.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-anime
port: 80
weight: 100
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-external-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-garage-local-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-anime/radarr5-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-anime-postgresql-17-recovery"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-anime/radarr5-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-anime-postgresql-17-alert-rules
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-anime-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-anime"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-anime", persistentvolumeclaim=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-anime",pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-anime/radarr5-anime-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-anime", pod=~"radarr5-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-anime
cnpg_cluster: radarr5-anime-postgresql-17-cluster
---
# Source: radarr-anime/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
groups:
- name: radarr-anime
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Anime Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-anime.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrAnimeDown
annotations:
description: Radarr Anime service is down.
summary: Radarr Anime is down.
expr: |
radarr_anime_system_status{job=~".*radarr-anime.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-anime/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-anime-config-backup-source
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime-config-backup-source
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
sourcePVC: radarr-anime-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-anime-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-anime-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-external-backup"
---
# Source: radarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-anime-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-anime-postgresql-17
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-anime-postgresql-17-garage-local-backup"
---
# Source: radarr-anime/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-anime
namespace: radarr-anime
labels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
app.kubernetes.io/part-of: radarr-anime
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-anime
app.kubernetes.io/instance: radarr-anime
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,928 +0,0 @@
---
# Source: radarr-standup/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-standup-nfs-storage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-nfs-storage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-standup-config
labels:
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr-standup
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr-standup/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-standup-nfs-storage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-nfs-storage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
volumeName: radarr-standup-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr-standup
labels:
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/service: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
namespace: radarr-standup
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/name: radarr-standup
---
# Source: radarr-standup/charts/radarr-standup/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr-standup
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr-standup
helm.sh/chart: radarr-standup-4.4.0
namespace: radarr-standup
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/name: radarr-standup
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-standup-config
- name: media
persistentVolumeClaim:
claimName: radarr-standup-nfs-storage
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-standup-postgresql-17-cluster
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-external-backup"
serverName: "radarr5-standup-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup"
serverName: "radarr5-standup-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-recovery"
serverName: radarr5-standup-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-standup-postgresql-17-backup-1
externalClusters:
- name: radarr5-standup-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-recovery"
serverName: radarr5-standup-postgresql-17-backup-1
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-config-backup-secret
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-config-backup-secret
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5-standup/radarr5-standup-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-postgresql-17-cluster-backup-secret
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr-standup/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr-standup/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: http-route-radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr-standup.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr-standup
port: 80
weight: 100
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-external-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-garage-local-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr-standup/radarr5-standup-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-standup-postgresql-17-recovery"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5-standup/radarr5-standup-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-standup-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-standup-postgresql-17-alert-rules
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-standup-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr-standup"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr-standup"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr-standup", persistentvolumeclaim=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr-standup",pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr-standup/radarr5-standup-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr-standup", pod=~"radarr5-standup-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr-standup
cnpg_cluster: radarr5-standup-postgresql-17-cluster
---
# Source: radarr-standup/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
groups:
- name: radarr-standup
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Stand Up Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr-standup.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrStandUpDown
annotations:
description: Radarr Stand Up service is down.
summary: Radarr Stand Up is down.
expr: |
radarr_standup_system_status{job=~".*radarr-standup.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr-standup/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-standup-config-backup-source
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup-config-backup-source
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
sourcePVC: radarr-standup-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-standup-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-standup-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-standup-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-external-backup"
---
# Source: radarr-standup/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-standup-postgresql-17-live-backup-scheduled-backup"
namespace: radarr-standup
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-standup-postgresql-17
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-standup-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-standup-postgresql-17-garage-local-backup"
---
# Source: radarr-standup/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr-standup
namespace: radarr-standup
labels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
app.kubernetes.io/part-of: radarr-standup
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr-standup
app.kubernetes.io/instance: radarr-standup
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,930 +0,0 @@
---
# Source: radarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: radarr-nfs-storage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-nfs-storage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: radarr/charts/radarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-config
labels:
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
helm.sh/chart: radarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: radarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: radarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-nfs-storage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-nfs-storage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
volumeName: radarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: radarr/charts/radarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: radarr
labels:
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
app.kubernetes.io/service: radarr
helm.sh/chart: radarr-4.4.0
namespace: radarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 7878
protocol: TCP
name: http
- port: 9793
targetPort: 9793
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
---
# Source: radarr/charts/radarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: radarr
helm.sh/chart: radarr-4.4.0
namespace: radarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/radarr:6.0.4@sha256:06ac318ecb95a34c7b229568dcb4271f02cb5007bb189a0dd67a2032864187ca
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- radarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9793"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-config
- name: media
persistentVolumeClaim:
claimName: radarr-nfs-storage
---
# Source: radarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: radarr5-postgresql-17-cluster
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-postgresql-17-external-backup"
serverName: "radarr5-postgresql-17-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "radarr5-postgresql-17-garage-local-backup"
serverName: "radarr5-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-recovery"
serverName: radarr5-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: radarr5-postgresql-17-backup-1
externalClusters:
- name: radarr5-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "radarr5-postgresql-17-recovery"
serverName: radarr5-postgresql-17-backup-1
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-config-backup-secret
namespace: radarr
labels:
app.kubernetes.io/name: radarr-config-backup-secret
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/radarr5/radarr5-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-postgresql-17-cluster-backup-secret
namespace: radarr
labels:
app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: radarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: radarr-postgresql-17-cluster-backup-secret-garage
namespace: radarr
labels:
app.kubernetes.io/name: radarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: radarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-radarr
namespace: radarr
labels:
app.kubernetes.io/name: http-route-radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- radarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: radarr
port: 80
weight: 100
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-external-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/radarr5/radarr5-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-garage-local-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr/radarr5-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: radarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "radarr5-postgresql-17-recovery"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/radarr5/radarr5-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: radarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: radarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr5-postgresql-17-alert-rules
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/radarr5-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 1
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="radarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="radarr"}) < 2
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="radarr", persistentvolumeclaim=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="radarr",pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "radarr/radarr5-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="radarr", pod=~"radarr5-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: radarr
cnpg_cluster: radarr5-postgresql-17-cluster
---
# Source: radarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: radarr
namespace: radarr
labels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
groups:
- name: radarr
rules:
- alert: ExportarrAbsent
annotations:
description: Radarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*radarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: RadarrDown
annotations:
description: Radarr service is down.
summary: Radarr is down.
expr: |
radarr_system_status{job=~".*radarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: radarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: radarr-config-backup-source
namespace: radarr
labels:
app.kubernetes.io/name: radarr-config-backup-source
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
sourcePVC: radarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: radarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-postgresql-17-daily-backup-scheduled-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-external-backup"
---
# Source: radarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "radarr5-postgresql-17-live-backup-scheduled-backup"
namespace: radarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: radarr5-postgresql-17
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: radarr5-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "radarr5-postgresql-17-garage-local-backup"
---
# Source: radarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: radarr
namespace: radarr
labels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/part-of: radarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: radarr
app.kubernetes.io/instance: radarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@@ -1,303 +0,0 @@
---
# Source: reloader/charts/reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader
namespace: reloader
---
# Source: reloader/charts/reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-role
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- list
- get
- watch
- apiGroups:
- "apps"
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- list
- get
- update
- patch
- apiGroups:
- "batch"
resources:
- cronjobs
verbs:
- list
- get
- apiGroups:
- "batch"
resources:
- jobs
verbs:
- create
- delete
- list
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: reloader/charts/reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-reloader-role
subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: reloader
---
# Source: reloader/charts/reloader/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-metadata-role
namespace: reloader
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- get
- watch
- create
- update
---
# Source: reloader/charts/reloader/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-metadata-role-binding
namespace: reloader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: reloader-reloader-metadata-role
subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: reloader
---
# Source: reloader/charts/reloader/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
group: com.stakater.platform
provider: stakater
version: v1.4.10
name: reloader-reloader
namespace: reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader-reloader
release: "reloader"
template:
metadata:
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
group: com.stakater.platform
provider: stakater
version: v1.4.10
spec:
containers:
- image: "ghcr.io/stakater/reloader:v1.4.10"
imagePullPolicy: IfNotPresent
name: reloader-reloader
env:
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: '1'
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: '1'
- name: RELOADER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: RELOADER_DEPLOYMENT_NAME
value: reloader-reloader
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /live
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
securityContext:
{}
args:
- "--log-level=info"
securityContext:
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccountName: reloader-reloader
---
# Source: reloader/charts/reloader/templates/podmonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader
namespace: reloader
spec:
podMetricsEndpoints:
- port: http
path: "/metrics"
honorLabels: true
jobLabel: reloader-reloader
namespaceSelector:
matchNames:
- reloader
selector:
matchLabels:
app: reloader-reloader
release: "reloader"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,559 +0,0 @@
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-digital-ocean
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-digital-ocean
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-garage-local
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-local
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-garage-remote
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-remote
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-ceph-directus
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-ceph-directus
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-ceph-directus
labels:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: ceph-directus
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
key: BUCKET_HOST
name: s3-ceph-directus-secret
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-ceph-directus-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-ceph-directus-secret
- name: S3_REGION
value: us-east-1
- name: LOG_LEVEL
value: info
- name: S3_FORCE_PATH_STYLE
value: "true"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-digital-ocean
labels:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: digital-ocean
- name: S3_ENDPOINT
value: https://nyc3.digitaloceanspaces.com
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-do-home-infra-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-do-home-infra-secret
- name: S3_REGION
valueFrom:
secretKeyRef:
key: AWS_REGION
name: s3-do-home-infra-secret
- name: LOG_LEVEL
value: info
- name: S3_FORCE_PATH_STYLE
value: "false"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-garage-local
labels:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: garage-local
- name: S3_ENDPOINT
value: http://garage-main.garage:3900
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-garage-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-garage-secret
- name: S3_REGION
value: us-east-1
- name: LOG_LEVEL
value: debug
- name: S3_FORCE_PATH_STYLE
value: "true"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-garage-remote
labels:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: garage-remote
- name: S3_ENDPOINT
value: https://garage-ps10rp.boreal-beaufort.ts.net:3900
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-garage-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-garage-secret
- name: S3_REGION
value: us-east-1
- name: LOG_LEVEL
value: debug
- name: S3_FORCE_PATH_STYLE
value: "true"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: s3-do-home-infra-secret
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-do-home-infra-secret
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/all-access
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/all-access
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
- secretKey: AWS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/prometheus-exporter
metadataPolicy: None
property: AWS_REGION
---
# Source: s3-exporter/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: s3-ceph-directus-secret
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-ceph-directus-secret
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/directus/ceph
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/directus/ceph
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
- secretKey: BUCKET_HOST
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/directus/ceph
metadataPolicy: None
property: BUCKET_HOST
---
# Source: s3-exporter/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: s3-garage-secret
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-garage-secret
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/s3-exporter
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/s3-exporter
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-digital-ocean
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-digital-ocean
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-ceph-directus
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-ceph-directus
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-garage-local
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-local
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-garage-remote
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-remote
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics

View File

@@ -1,435 +0,0 @@
---
# Source: searxng/charts/searxng/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-api-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: searxng/charts/searxng/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-browser-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-api
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-api
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-browser
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-browser
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-api
labels:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: api
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: http://searxng-api.searxng:8080
- name: SEARXNG_QUERY_URL
value: http://searxng-api.searxng:8080/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng-api.searxng
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
- name: ENABLE_RAG_WEB_SEARCH
value: "true"
- name: RAG_WEB_SEARCH_ENGINE
value: searxng
- name: RAG_WEB_SEARCH_RESULT_COUNT
value: "3"
- name: RAG_WEB_SEARCH_CONCURRENT_REQUESTS
value: "10"
image: searxng/searxng:latest@sha256:09dfc123bd7c118ed086471b42d17ed57964827beffeb8d7f012dae3d2608545
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: api-data
- mountPath: /etc/searxng/settings.yml
mountPropagation: None
name: config
readOnly: true
subPath: settings.yml
- mountPath: /etc/searxng/limiter.toml
mountPropagation: None
name: config
readOnly: true
subPath: limiter.toml
volumes:
- name: api-data
persistentVolumeClaim:
claimName: searxng-api-data
- name: config
secret:
secretName: searxng-api-config-secret
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-browser
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: browser
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: https://searxng.alexlebens.net/
- name: SEARXNG_QUERY_URL
value: https://searxng.alexlebens.net/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng.alexlebens.net
- name: SEARXNG_REDIS_URL
value: redis://redis-replication-searxng-master.searxng:6379/0
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
image: searxng/searxng:latest@sha256:09dfc123bd7c118ed086471b42d17ed57964827beffeb8d7f012dae3d2608545
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: browser-data
volumes:
- name: browser-data
persistentVolumeClaim:
claimName: searxng-browser-data
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: searxng-api-config-secret
namespace: searxng
labels:
app.kubernetes.io/name: searxng-api-config-secret
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: settings.yml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/searxng/api/config
metadataPolicy: None
property: settings.yml
- secretKey: limiter.toml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/searxng/api/config
metadataPolicy: None
property: limiter.toml
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: searxng-browser-data-backup-secret
namespace: searxng
labels:
app.kubernetes.io/name: searxng-browser-data-backup-secret
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/searxng/searxng-browser-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: searxng/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-searxng
namespace: searxng
labels:
app.kubernetes.io/name: http-route-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- searxng.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: searxng-browser
port: 80
weight: 100
---
# Source: searxng/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-searxng
namespace: searxng
labels:
app.kubernetes.io/name: redis-replication-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: searxng/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: searxng-browser-data-backup-source
namespace: searxng
labels:
app.kubernetes.io/name: searxng-browser-data-backup-source
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
sourcePVC: searxng-browser-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: searxng-browser-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: searxng/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-searxng
namespace: searxng
labels:
app.kubernetes.io/name: redis-replication-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -1,185 +0,0 @@
---
# Source: shelly-plug/charts/shelly-plug/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: shelly-plug
labels:
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: shelly-plug
helm.sh/chart: shelly-plug-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: shelly-plug
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"
storageClassName: "ceph-block"
---
# Source: shelly-plug/charts/shelly-plug/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: shelly-plug
labels:
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/service: shelly-plug
helm.sh/chart: shelly-plug-4.4.0
namespace: shelly-plug
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/name: shelly-plug
---
# Source: shelly-plug/charts/shelly-plug/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: shelly-plug
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: shelly-plug
helm.sh/chart: shelly-plug-4.4.0
namespace: shelly-plug
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/instance: shelly-plug
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/name: shelly-plug
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
cd /var/www/html
if [ -d ".git" ]; then
echo "Git repository found. Pulling latest changes..."
git pull
else
echo "Not a git repository. Initializing ..."
git init
git remote add origin https://github.com/geerlingguy/shelly-plug-prometheus.git
git fetch origin
git checkout origin/master -ft
fi
image: alpine/git:latest
imagePullPolicy: IfNotPresent
name: init-fetch-repo
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /var/www/html
name: script
containers:
- env:
- name: SHELLY_HOSTNAME
value: it05sp.alexlebens.net
- name: SHELLY_GENERATION
value: "2"
envFrom:
- secretRef:
name: shelly-plug-config-secret
image: php:8.4.15-apache-bookworm
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
volumeMounts:
- mountPath: /var/www/html
name: script
volumes:
- name: script
persistentVolumeClaim:
claimName: shelly-plug
---
# Source: shelly-plug/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: shelly-plug-config-secret
namespace: shelly-plug
labels:
app.kubernetes.io/name: shelly-plug-config-secret
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/part-of: shelly-plug
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: SHELLY_HTTP_USERNAME
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /shelly-plug/auth/it05sp
metadataPolicy: None
property: SHELLY_HTTP_USERNAME
- secretKey: SHELLY_HTTP_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /shelly-plug/auth/it05sp
metadataPolicy: None
property: SHELLY_HTTP_PASSWORD
---
# Source: shelly-plug/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: shelly-plug
namespace: shelly-plug
labels:
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/part-of: shelly-plug
spec:
selector:
matchLabels:
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/instance: shelly-plug
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics

View File

@@ -1,153 +0,0 @@
---
# Source: site-documentation/charts/site-documentation/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: site-documentation
labels:
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-documentation
app.kubernetes.io/service: site-documentation
helm.sh/chart: site-documentation-4.4.0
namespace: site-documentation
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4321
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: site-documentation
---
# Source: site-documentation/charts/cloudflared-site/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-documentation-cloudflared-site
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-site-1.23.1
namespace: site-documentation
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/instance: site-documentation
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: cloudflared-site
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: site-documentation-cloudflared-api-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-documentation/charts/site-documentation/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-documentation
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-documentation
helm.sh/chart: site-documentation-4.4.0
namespace: site-documentation
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: site-documentation
app.kubernetes.io/instance: site-documentation
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: site-documentation
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: harbor.alexlebens.net/images/site-documentation:0.0.4
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-documentation/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: site-documentation-cloudflared-api-secret
namespace: site-documentation
labels:
app.kubernetes.io/name: site-documentation-cloudflared-api-secret
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/part-of: site-documentation
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/site-documentation
metadataPolicy: None
property: token

View File

@@ -1,153 +0,0 @@
---
# Source: site-profile/charts/site-profile/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: site-profile
labels:
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-profile
app.kubernetes.io/service: site-profile
helm.sh/chart: site-profile-4.4.0
namespace: site-profile
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4321
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: site-profile
---
# Source: site-profile/charts/cloudflared-site/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-profile-cloudflared-site
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-site-1.23.1
namespace: site-profile
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/instance: site-profile
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: cloudflared-site
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: site-profile-cloudflared-api-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-profile/charts/site-profile/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-profile
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-profile
helm.sh/chart: site-profile-4.4.0
namespace: site-profile
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: site-profile
app.kubernetes.io/instance: site-profile
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: site-profile
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: harbor.alexlebens.net/images/site-profile:2.1.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-profile/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: site-profile-cloudflared-api-secret
namespace: site-profile
labels:
app.kubernetes.io/name: site-profile-cloudflared-api-secret
app.kubernetes.io/instance: site-profile
app.kubernetes.io/part-of: site-profile
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/site-profile
metadataPolicy: None
property: token

View File

@@ -1,396 +0,0 @@
---
# Source: slskd/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: slskd
labels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: slskd/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: slskd-nfs-storage
namespace: slskd
labels:
app.kubernetes.io/name: slskd-nfs-storage
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: slskd/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: slskd-nfs-storage
namespace: slskd
labels:
app.kubernetes.io/name: slskd-nfs-storage
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
volumeName: slskd-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: slskd/charts/slskd/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: slskd
labels:
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
app.kubernetes.io/service: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
type: ClusterIP
ports:
- port: 5030
targetPort: 5030
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
---
# Source: slskd/charts/slskd/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: slskd-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- args:
- -ec
- |
sysctl -w net.ipv4.ip_forward=1;
sysctl -w net.ipv6.conf.all.disable_ipv6=1
command:
- /bin/sh
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-sysctl
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
privileged: true
containers:
- env:
- name: VPN_SERVICE_PROVIDER
value: protonvpn
- name: VPN_TYPE
value: wireguard
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: private-key
name: slskd-wireguard-conf
- name: VPN_PORT_FORWARDING
value: "on"
- name: PORT_FORWARD_ONLY
value: "on"
- name: FIREWALL_OUTBOUND_SUBNETS
value: 192.168.1.0/24,10.244.0.0/16
- name: FIREWALL_INPUT_PORTS
value: 5030,50300
- name: DOT
value: "off"
image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30
imagePullPolicy: IfNotPresent
name: gluetun
resources:
limits:
devic.es/tun: "1"
requests:
cpu: 10m
devic.es/tun: "1"
memory: 128Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: SLSKD_UMASK
value: "0"
image: slskd/slskd:0.24.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: data
- mountPath: /app/slskd.yml
mountPropagation: None
name: slskd-config
readOnly: true
subPath: slskd.yml
volumes:
- name: data
persistentVolumeClaim:
claimName: slskd-nfs-storage
- name: slskd-config
secret:
secretName: slskd-config-secret
---
# Source: slskd/charts/slskd/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: slskd-soularr
labels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
template:
metadata:
labels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: SCRIPT_INTERVAL
value: "300"
image: mrusse08/soularr:latest@sha256:71a0b9e5a522d76bb0ffdb6d720d681fde22417b3a5acc9ecae61c89d05d8afc
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /mnt/store
name: data
- mountPath: /data/config.ini
mountPropagation: None
name: soularr-config
readOnly: true
subPath: config.ini
volumes:
- name: data
persistentVolumeClaim:
claimName: slskd-nfs-storage
- name: soularr-config
secret:
secretName: soularr-config-secret
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: slskd-config-secret
namespace: slskd
labels:
app.kubernetes.io/name: slskd-config-secret
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: slskd.yml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/slskd/config
metadataPolicy: None
property: slskd.yml
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: soularr-config-secret
namespace: slskd
labels:
app.kubernetes.io/name: soularr-config-secret
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: config.ini
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/slskd/soularr
metadataPolicy: None
property: config.ini
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: slskd-wireguard-conf
namespace: slskd
labels:
app.kubernetes.io/name: slskd-wireguard-conf
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /protonvpn/conf/cl01tl
metadataPolicy: None
property: private-key
---
# Source: slskd/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-slskd
namespace: slskd
labels:
app.kubernetes.io/name: http-route-slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- slskd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: slskd
port: 5030
weight: 100
---
# Source: slskd/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: slskd
namespace: slskd
labels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
selector:
matchLabels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
endpoints:
- port: http
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,928 +0,0 @@
---
# Source: sonarr-4k/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-4k-nfs-storage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-nfs-storage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-4k-config
labels:
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr-4k
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr-4k/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-4k-nfs-storage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-nfs-storage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
volumeName: sonarr-4k-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr-4k
labels:
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/service: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
namespace: sonarr-4k
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/name: sonarr-4k
---
# Source: sonarr-4k/charts/sonarr-4k/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr-4k
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-4k
helm.sh/chart: sonarr-4k-4.4.0
namespace: sonarr-4k
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/name: sonarr-4k
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-4k-config
- name: media
persistentVolumeClaim:
claimName: sonarr-4k-nfs-storage
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-4k-postgresql-17-cluster
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-external-backup"
serverName: "sonarr4-4k-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup"
serverName: "sonarr4-4k-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-recovery"
serverName: sonarr4-4k-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-4k-postgresql-17-backup-1
externalClusters:
- name: sonarr4-4k-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-recovery"
serverName: sonarr4-4k-postgresql-17-backup-1
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-config-backup-secret
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-config-backup-secret
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-4k/sonarr4-4k-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-postgresql-17-cluster-backup-secret
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr-4k/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr-4k/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: http-route-sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr-4k.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr-4k
port: 80
weight: 100
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-external-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-garage-local-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-4k-postgresql-17-recovery"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-4k/sonarr4-4k-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-4k-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-4k-postgresql-17-alert-rules
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-4k-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-4k"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-4k"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-4k", persistentvolumeclaim=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr-4k",pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr-4k/sonarr4-4k-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-4k", pod=~"sonarr4-4k-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr-4k
cnpg_cluster: sonarr4-4k-postgresql-17-cluster
---
# Source: sonarr-4k/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
groups:
- name: sonarr-4k
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr 4K Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr-4k.*"} == 1)
for: 5m
labels:
severity: critical
- alert: Sonarr4KDown
annotations:
description: Sonarr 4K service is down.
summary: Sonarr 4K is down.
expr: |
sonarr_4k_system_status{job=~".*sonarr-4k.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr-4k/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-4k-config-backup-source
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k-config-backup-source
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
sourcePVC: sonarr-4k-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-4k-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-4k-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-external-backup"
---
# Source: sonarr-4k/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-4k-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr-4k
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-4k-postgresql-17
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-4k-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-4k-postgresql-17-garage-local-backup"
---
# Source: sonarr-4k/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr-4k
namespace: sonarr-4k
labels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
app.kubernetes.io/part-of: sonarr-4k
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr-4k
app.kubernetes.io/instance: sonarr-4k
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,928 +0,0 @@
---
# Source: sonarr-anime/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-anime-nfs-storage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-nfs-storage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-anime-config
labels:
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr-anime
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr-anime/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-anime-nfs-storage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-nfs-storage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
volumeName: sonarr-anime-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr-anime
labels:
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/service: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
namespace: sonarr-anime
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/name: sonarr-anime
---
# Source: sonarr-anime/charts/sonarr-anime/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr-anime
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr-anime
helm.sh/chart: sonarr-anime-4.4.0
namespace: sonarr-anime
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/name: sonarr-anime
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-anime-config
- name: media
persistentVolumeClaim:
claimName: sonarr-anime-nfs-storage
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-anime-postgresql-17-cluster
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-external-backup"
serverName: "sonarr4-anime-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup"
serverName: "sonarr4-anime-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-recovery"
serverName: sonarr4-anime-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 512Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-anime-postgresql-17-backup-1
externalClusters:
- name: sonarr4-anime-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-recovery"
serverName: sonarr4-anime-postgresql-17-backup-1
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-config-backup-secret
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-config-backup-secret
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4-anime/sonarr4-anime-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-postgresql-17-cluster-backup-secret
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr-anime/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr-anime/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: http-route-sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr-anime.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr-anime
port: 80
weight: 100
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-external-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-garage-local-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-anime-postgresql-17-recovery"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr-anime/sonarr4-anime-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-anime-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-anime-postgresql-17-alert-rules
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-anime-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr-anime"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr-anime"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr-anime", persistentvolumeclaim=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr-anime",pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr-anime/sonarr4-anime-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr-anime", pod=~"sonarr4-anime-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr-anime
cnpg_cluster: sonarr4-anime-postgresql-17-cluster
---
# Source: sonarr-anime/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
groups:
- name: sonarr-anime
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr Anime Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr-anime.*"} == 1)
for: 5m
labels:
severity: critical
- alert: SonarrAnimeDown
annotations:
description: Sonarr Anime service is down.
summary: Sonarr Anime is down.
expr: |
sonarr_anime_system_status{job=~".*sonarr-anime.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr-anime/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-anime-config-backup-source
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime-config-backup-source
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
sourcePVC: sonarr-anime-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-anime-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-anime-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-external-backup"
---
# Source: sonarr-anime/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-anime-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr-anime
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-anime-postgresql-17
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-anime-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-anime-postgresql-17-garage-local-backup"
---
# Source: sonarr-anime/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr-anime
namespace: sonarr-anime
labels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
app.kubernetes.io/part-of: sonarr-anime
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr-anime
app.kubernetes.io/instance: sonarr-anime
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,928 +0,0 @@
---
# Source: sonarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: sonarr-nfs-storage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-nfs-storage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: sonarr/charts/sonarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-config
labels:
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
helm.sh/chart: sonarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: sonarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "20Gi"
storageClassName: "ceph-block"
---
# Source: sonarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-nfs-storage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-nfs-storage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
volumeName: sonarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: sonarr/charts/sonarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarr
labels:
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
app.kubernetes.io/service: sonarr
helm.sh/chart: sonarr-4.4.0
namespace: sonarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8989
protocol: TCP
name: http
- port: 9794
targetPort: 9794
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
---
# Source: sonarr/charts/sonarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: sonarr
helm.sh/chart: sonarr-4.4.0
namespace: sonarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
image: ghcr.io/linuxserver/sonarr:4.0.16@sha256:60e5edcac39172294ad22d55d1b08c2c0a9fe658cad2f2c4d742ae017d7874de
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
- args:
- sonarr
env:
- name: URL
value: http://localhost
- name: CONFIG
value: /config/config.xml
- name: PORT
value: "9794"
- name: ENABLE_ADDITIONAL_METRICS
value: "false"
- name: ENABLE_UNKNOWN_QUEUE_ITEMS
value: "false"
image: ghcr.io/onedr0p/exportarr:v2.3.0
imagePullPolicy: IfNotPresent
name: metrics
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /config
name: config
readOnly: true
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-config
- name: media
persistentVolumeClaim:
claimName: sonarr-nfs-storage
---
# Source: sonarr/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: sonarr4-postgresql-17-cluster
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-postgresql-17-external-backup"
serverName: "sonarr4-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "sonarr4-postgresql-17-garage-local-backup"
serverName: "sonarr4-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-recovery"
serverName: sonarr4-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 1Gi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: sonarr4-postgresql-17-backup-1
externalClusters:
- name: sonarr4-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "sonarr4-postgresql-17-recovery"
serverName: sonarr4-postgresql-17-backup-1
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-config-backup-secret
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-config-backup-secret
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/sonarr4/sonarr4-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-postgresql-17-cluster-backup-secret
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: sonarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: sonarr-postgresql-17-cluster-backup-secret-garage
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: sonarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: http-route-sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- sonarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: sonarr
port: 80
weight: 100
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-external-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/sonarr4/sonarr4-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-garage-local-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: sonarr/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "sonarr4-postgresql-17-recovery"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/sonarr/sonarr4-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: sonarr-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: sonarr/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr4-postgresql-17-alert-rules
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/sonarr4-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 1
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="sonarr"} - cnpg_pg_replication_is_wal_receiver_up{namespace="sonarr"}) < 2
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="sonarr", persistentvolumeclaim=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="sonarr",pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "sonarr/sonarr4-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="sonarr", pod=~"sonarr4-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: sonarr
cnpg_cluster: sonarr4-postgresql-17-cluster
---
# Source: sonarr/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
groups:
- name: sonarr
rules:
- alert: ExportarrAbsent
annotations:
description: Sonarr Exportarr has disappeared from Prometheus
service discovery.
summary: Exportarr is down.
expr: |
absent(up{job=~".*sonarr.*"} == 1)
for: 5m
labels:
severity: critical
- alert: SonarrDown
annotations:
description: Sonarr service is down.
summary: Sonarr is down.
expr: |
sonarr_system_status{job=~".*sonarr.*"} == 0
for: 5m
labels:
severity: critical
---
# Source: sonarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: sonarr-config-backup-source
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr-config-backup-source
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
sourcePVC: sonarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: sonarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-postgresql-17-daily-backup-scheduled-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-external-backup"
---
# Source: sonarr/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "sonarr4-postgresql-17-live-backup-scheduled-backup"
namespace: sonarr
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: sonarr4-postgresql-17
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: sonarr4-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "sonarr4-postgresql-17-garage-local-backup"
---
# Source: sonarr/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sonarr
namespace: sonarr
labels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/part-of: sonarr
spec:
selector:
matchLabels:
app.kubernetes.io/name: sonarr
app.kubernetes.io/instance: sonarr
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -1,103 +0,0 @@
---
# Source: speedtest-exporter/charts/speedtest-exporter/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: speedtest-exporter
labels:
helm.sh/chart: speedtest-exporter-0.1.2
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
app.kubernetes.io/version: "v3.5.4"
app.kubernetes.io/managed-by: Helm
---
# Source: speedtest-exporter/charts/speedtest-exporter/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: speedtest-exporter
labels:
helm.sh/chart: speedtest-exporter-0.1.2
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
app.kubernetes.io/version: "v3.5.4"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9798
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
---
# Source: speedtest-exporter/charts/speedtest-exporter/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: speedtest-exporter
labels:
helm.sh/chart: speedtest-exporter-0.1.2
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
app.kubernetes.io/version: "v3.5.4"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
template:
metadata:
labels:
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
spec:
serviceAccountName: speedtest-exporter
securityContext:
{}
containers:
- name: speedtest-exporter
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
image: "ghcr.io/miguelndecarvalho/speedtest-exporter:v3.5.4"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 9798
protocol: TCP
resources:
{}
---
# Source: speedtest-exporter/charts/speedtest-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: speedtest-exporter
namespace: speedtest-exporter
labels:
helm.sh/chart: speedtest-exporter-0.1.2
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
app.kubernetes.io/version: "v3.5.4"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: speedtest-exporter
app.kubernetes.io/instance: speedtest-exporter
endpoints:
- port: http
interval: 180m
scrapeTimeout: 2m
namespaceSelector:
matchNames:
- speedtest-exporter

View File

@@ -1,284 +0,0 @@
---
# Source: stack/templates/application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: stack-cl01tl
namespace: argocd
labels:
app.kubernetes.io/name: stack-cl01tl
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
targetRevision: manifests
path: clusters/cl01tl/manifests/stack
destination:
name: in-cluster
namespace: argocd
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
selfHeal: false
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true
---
# Source: stack/templates/application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cilium
namespace: argocd
labels:
app.kubernetes.io/name: cilium
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
targetRevision: manifests
path: clusters/cl01tl/manifests/cilium
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
ignoreDifferences:
- group: monitoring.coreos.com
kind: ServiceMonitor
jqPathExpressions:
- .spec.endpoints[]?.relabelings[]?.action
syncPolicy:
automated:
prune: true
selfHeal: false
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true
---
# Source: stack/templates/application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: coredns
namespace: argocd
labels:
app.kubernetes.io/name: coredns
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
targetRevision: manifests
path: clusters/cl01tl/manifests/coredns
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
selfHeal: true
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
---
# Source: stack/templates/application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metrics-server
namespace: argocd
labels:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
targetRevision: manifests
path: clusters/cl01tl/manifests/metrics-server
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
selfHeal: true
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
---
# Source: stack/templates/application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-operator-crds
namespace: argocd
labels:
app.kubernetes.io/name: prometheus-operator-crds
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
targetRevision: manifests
path: clusters/cl01tl/manifests/prometheus-operator-crds
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
selfHeal: false
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
---
# Source: stack/templates/application-set.yaml
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: application-set-cl01tl
namespace: argocd
labels:
app.kubernetes.io/name: application-set-cl01tl
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
spec:
syncPolicy:
applicationsSync: create-update
preserveResourcesOnDeletion: false
generators:
- git:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
revision: manifests
directories:
- path: clusters/cl01tl/manifests/*
- path: clusters/cl01tl/manifests/stack
exclude: true
- path: clusters/cl01tl/manifests/cilium
exclude: true
- path: clusters/cl01tl/manifests/coredns
exclude: true
- path: clusters/cl01tl/manifests/metrics-server
exclude: true
- path: clusters/cl01tl/manifests/prometheus-operator-crds
exclude: true
template:
metadata:
name: '{{path.basename}}'
spec:
project: default
source:
repoURL: http://gitea-http.gitea:3000/alexlebens/infrastructure
targetRevision: manifests
path: '{{path}}'
destination:
name: in-cluster
namespace: '{{path.basename}}'
revisionHistoryLimit: 3
ignoreDifferences:
- group: ""
kind: Service
jqPathExpressions:
- .spec.externalName
- group: "apps"
kind: "Deployment"
jsonPointers:
- /spec/template/metadata/annotations/checksum~1secret
- /spec/template/metadata/annotations/checksum~1secret-core
- /spec/template/metadata/annotations/checksum~1secret-jobservice
- /spec/template/metadata/annotations/checksum~1tls
- group: "apps"
kind: "StatefulSet"
jsonPointers:
- /spec/template/metadata/annotations/checksum~1secret
- /spec/template/metadata/annotations/checksum~1tls
- group: "apps"
kind: StatefulSet
jqPathExpressions:
- .spec.volumeClaimTemplates[]?.apiVersion
- .spec.volumeClaimTemplates[]?.kind
- .spec.volumeClaimTemplates[]?.metadata.creationTimestamp
- group: ""
kind: GpuDevicePlugin
jqPathExpressions:
- .metadata.annotations[]
syncPolicy:
automated:
prune: true
selfHeal: false
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace=true
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true

View File

@@ -1,937 +0,0 @@
---
# Source: stalwart/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: stalwart
labels:
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: stalwart/charts/stalwart/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stalwart-config
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: stalwart
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: stalwart/charts/stalwart/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: stalwart
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
app.kubernetes.io/service: stalwart
helm.sh/chart: stalwart-4.4.0
namespace: stalwart
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
- port: 143
targetPort: 143
protocol: TCP
name: imap
- port: 993
targetPort: 993
protocol: TCP
name: imaps
- port: 25
targetPort: 25
protocol: TCP
name: smtp
- port: 465
targetPort: 465
protocol: TCP
name: smtps
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
---
# Source: stalwart/charts/stalwart/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: stalwart
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.4.0
namespace: stalwart
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: stalwartlabs/stalwart:v0.14.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /opt/stalwart
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: stalwart-config
---
# Source: stalwart/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: stalwart-postgresql-17-cluster
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "stalwart-postgresql-17-external-backup"
serverName: "stalwart-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "stalwart-postgresql-17-garage-local-backup"
serverName: "stalwart-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-17-recovery"
serverName: stalwart-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: stalwart-postgresql-17-backup-1
externalClusters:
- name: stalwart-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "stalwart-postgresql-17-recovery"
serverName: stalwart-postgresql-17-backup-1
---
# Source: stalwart/templates/elasticsearch.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: elasticsearch-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
version: 8.18.0
auth:
fileRealm:
- secretName: stalwart-elasticsearch-secret
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: ceph-block
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-elasticsearch-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-elasticsearch-secret
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: username
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/stalwart/elasticsearch
metadataPolicy: None
property: username
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/stalwart/elasticsearch
metadataPolicy: None
property: password
- secretKey: roles
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/stalwart/elasticsearch
metadataPolicy: None
property: roles
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-config-backup-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-config-backup-secret
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: v0.11.8
app.kubernetes.io/component: backup
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/stalwart/stalwart-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-postgresql-17-cluster-backup-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: v0.11.8
app.kubernetes.io/component: database
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-postgresql-17-cluster-backup-secret-garage
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: stalwart/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: http-route-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- stalwart.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: stalwart
port: 80
weight: 100
---
# Source: stalwart/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-17-external-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/stalwart/stalwart-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: stalwart-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: stalwart/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-17-garage-local-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/stalwart/stalwart-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: stalwart/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-17-recovery"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/stalwart/stalwart-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: stalwart/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: stalwart-postgresql-17-alert-rules
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/stalwart-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="stalwart"} - cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}) < 1
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="stalwart"} - cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}) < 2
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="stalwart",pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="stalwart",pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
---
# Source: stalwart/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: redis-replication-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: stalwart/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: stalwart-config-backup-source
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-config-backup-source
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
sourcePVC: stalwart-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: stalwart-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: stalwart/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "stalwart-postgresql-17-daily-backup-scheduled-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: stalwart-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-17-external-backup"
---
# Source: stalwart/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "stalwart-postgresql-17-live-backup-scheduled-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: stalwart-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-17-garage-local-backup"
---
# Source: stalwart/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: redis-replication-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -1,476 +0,0 @@
---
# Source: talos/templates/service-account.yaml
apiVersion: talos.dev/v1alpha1
kind: ServiceAccount
metadata:
name: talos-backup-secrets
namespace: talos
labels:
app.kubernetes.io/name: talos-backup-secrets
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
spec:
roles:
- os:etcd:backup
---
# Source: talos/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: talos-backup-secrets
namespace: talos
labels:
app.kubernetes.io/name: talos-backup-secrets
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
annotations:
kubernetes.io/service-account.name: talos-backup-secrets
---
# Source: talos/charts/etcd-backup/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: talos
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-backup-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "0 2 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- command:
- /talos-backup
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: talos-etcd-backup-secret
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: talos-etcd-backup-secret
- name: AWS_REGION
value: nyc3
- name: CUSTOM_S3_ENDPOINT
value: https://nyc3.digitaloceanspaces.com
- name: BUCKET
value: talos-backups-bee8585f7b8a4d0239c9b823
- name: S3_PREFIX
value: cl01tl/etcd
- name: CLUSTER_NAME
value: cl01tl
- name: AGE_X25519_PUBLIC_KEY
valueFrom:
secretKeyRef:
key: AGE_X25519_PUBLIC_KEY
name: talos-etcd-backup-secret
- name: USE_PATH_STYLE
value: "false"
image: ghcr.io/siderolabs/talos-backup:v0.1.0-beta.3@sha256:05c86663b251a407551dc948097e32e163a345818117eb52c573b0447bd0c7a7
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /var/run/secrets/talos.dev
mountPropagation: None
name: secret
readOnly: true
- mountPath: /.talos
name: talos
- mountPath: /tmp
name: tmp
workingDir: /tmp
- args:
- -ec
- |
export DATE_RANGE=$(date -d @$(( $(date +%s) - 1209600 )) +%Y-%m-%dT%H:%M:%SZ);
export FILE_MATCH="$BUCKET/cl01tl/etcd/cl01tl-$DATE_RANGE.snap.age"
echo ">> Running S3 prune for Talos backup repository"
echo ">> Backups prior to '$DATE_RANGE' will be removed"
echo ">> Backups to be removed:"
s3cmd ls ${BUCKET}/cl01tl/etcd/ |
awk -v file_match="$FILE_MATCH" '$4 < file_match {print $4}'
echo ">> Deleting ..."
s3cmd ls ${BUCKET}/cl01tl/etcd/ |
awk -v file_match="$FILE_MATCH" '$4 < file_match {print $4}' |
while read file; do
s3cmd del "$file";
done;
echo ">> Completed S3 prune for Talos backup repository"
command:
- /bin/sh
env:
- name: BUCKET
valueFrom:
secretKeyRef:
key: BUCKET
name: talos-etcd-backup-secret
image: d3fk/s3cmd:latest@sha256:590c42746db1252be8aad33e287c7910698c32b58b4fc34f67592a5bd0841551
imagePullPolicy: IfNotPresent
name: s3-prune
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /root/.s3cfg
mountPropagation: None
name: s3cmd-config
readOnly: true
subPath: .s3cfg
volumes:
- name: s3cmd-config
secret:
secretName: talos-etcd-backup-secret
- name: secret
secret:
secretName: talos-backup-secrets
- emptyDir:
medium: Memory
name: talos
- emptyDir:
medium: Memory
name: tmp
---
# Source: talos/charts/etcd-defrag/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: etcd-defrag-defrag-2
labels:
app.kubernetes.io/controller: defrag-2
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-defrag-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "10 0 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: defrag-2
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- etcd
- defrag
- -n
- 10.232.1.12
env:
- name: TALOSCONFIG
value: /tmp/.talos/config
image: ghcr.io/siderolabs/talosctl:v1.11.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /tmp/.talos/config
mountPropagation: None
name: talos-config-2
readOnly: true
subPath: config
volumes:
- name: talos-config-2
secret:
secretName: talos-etcd-defrag-secret
---
# Source: talos/charts/etcd-defrag/templates/common.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: etcd-defrag-defrag-3
labels:
app.kubernetes.io/controller: defrag-3
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-defrag-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "20 0 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: defrag-3
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- etcd
- defrag
- -n
- 10.232.1.13
env:
- name: TALOSCONFIG
value: /tmp/.talos/config
image: ghcr.io/siderolabs/talosctl:v1.11.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /tmp/.talos/config
mountPropagation: None
name: talos-config-3
readOnly: true
subPath: config
volumes:
- name: talos-config-3
secret:
secretName: talos-etcd-defrag-secret
---
# Source: talos/charts/etcd-defrag/templates/common.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: etcd-defrag-defrag-1
labels:
app.kubernetes.io/controller: defrag-1
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-defrag-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "0 0 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: defrag-1
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- etcd
- defrag
- -n
- 10.232.1.11
env:
- name: TALOSCONFIG
value: /tmp/.talos/config
image: ghcr.io/siderolabs/talosctl:v1.11.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /tmp/.talos/config
mountPropagation: None
name: talos-config-1
readOnly: true
subPath: config
volumes:
- name: talos-config-1
secret:
secretName: talos-etcd-defrag-secret
---
# Source: talos/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: talos-etcd-backup-secret
namespace: talos
labels:
app.kubernetes.io/name: talos-etcd-backup-secret
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
annotations:
kubernetes.io/service-account.name: talos-backup-secrets
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
- secretKey: .s3cfg
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: s3cfg
- secretKey: BUCKET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: BUCKET
- secretKey: AGE_X25519_PUBLIC_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/talos/etcd-backup
metadataPolicy: None
property: AGE_X25519_PUBLIC_KEY
---
# Source: talos/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: talos-etcd-defrag-secret
namespace: talos
labels:
app.kubernetes.io/name: talos-etcd-defrag-secret
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: config
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/talos/etcd-defrag
metadataPolicy: None
property: config

View File

@@ -1,331 +0,0 @@
---
# Source: tautulli/charts/tautulli/templates/common.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: tautulli
labels:
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
helm.sh/chart: tautulli-4.4.0
namespace: tautulli
data:
select_tmdb_poster.py: |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description: Selects the default TMDB poster if no poster is selected
or the current poster is from Gracenote.
Author: /u/SwiftPanda16
Requires: plexapi
Usage:
* Change the posters for an entire library:
python select_tmdb_poster.py --library "Movies"
* Change the poster for a specific item:
python select_tmdb_poster.py --rating_key 1234
* By default locked posters are skipped. To update locked posters:
python select_tmdb_poster.py --library "Movies" --include_locked
Tautulli script trigger:
* Notify on recently added
Tautulli script conditions:
* Filter which media to select the poster. Examples:
[ Media Type | is | movie ]
Tautulli script arguments:
* Recently Added:
--rating_key {rating_key}
'''
import argparse
import os
import plexapi.base
from plexapi.server import PlexServer
plexapi.base.USER_DONT_RELOAD_FOR_KEYS.add('fields')
# Environmental Variables
PLEX_URL = os.getenv('PLEX_URL')
PLEX_TOKEN = os.getenv('PLEX_TOKEN')
def select_tmdb_poster_library(library, include_locked=False):
for item in library.all(includeGuids=False):
# Only reload for fields
item.reload(**{k: 0 for k, v in item._INCLUDES.items()})
select_tmdb_poster_item(item, include_locked=include_locked)
def select_tmdb_poster_item(item, include_locked=False):
if item.isLocked('thumb') and not include_locked: # PlexAPI 4.5.10
print(f"Locked poster for {item.title}. Skipping.")
return
posters = item.posters()
selected_poster = next((p for p in posters if p.selected), None)
if selected_poster is None:
print(f"WARNING: No poster selected for {item.title}.")
else:
skipping = ' Skipping.' if selected_poster.provider != 'gracenote' else ''
print(f"Poster provider is '{selected_poster.provider}' for {item.title}.{skipping}")
if posters and (selected_poster is None or selected_poster.provider == 'gracenote'):
# Fallback to first poster if no TMDB posters are available
tmdb_poster = next((p for p in posters if p.provider == 'tmdb'), posters[0])
# Selecting the poster automatically locks it
tmdb_poster.select()
print(f"Selected {tmdb_poster.provider} poster for {item.title}.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--rating_key', type=int)
parser.add_argument('--library')
parser.add_argument('--include_locked', action='store_true')
opts = parser.parse_args()
plex = PlexServer(PLEX_URL, PLEX_TOKEN)
if opts.rating_key:
item = plex.fetchItem(opts.rating_key)
select_tmdb_poster_item(item, opts.include_locked)
elif opts.library:
library = plex.library.section(opts.library)
select_tmdb_poster_library(library, opts.include_locked)
else:
print("No --rating_key or --library specified. Exiting.")
---
# Source: tautulli/charts/tautulli/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tautulli-config
labels:
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
helm.sh/chart: tautulli-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tautulli
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: tautulli/charts/tautulli/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tautulli
labels:
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
app.kubernetes.io/service: tautulli
helm.sh/chart: tautulli-4.4.0
namespace: tautulli
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8181
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tautulli
app.kubernetes.io/name: tautulli
---
# Source: tautulli/charts/tautulli/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tautulli
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
helm.sh/chart: tautulli-4.4.0
annotations:
reloader.stakater.com/auto: "true"
namespace: tautulli
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: tautulli
app.kubernetes.io/instance: tautulli
template:
metadata:
annotations:
checksum/configMaps: 8f779aaa6f9bccc9e07f526b05d4f9d81e7e55a443819d526312ff297ac88ba5
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tautulli
app.kubernetes.io/name: tautulli
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: PUID
value: "1001"
- name: GUID
value: "1001"
- name: TZ
value: US/Central
image: ghcr.io/tautulli/tautulli:v2.16.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/scripts/select_tmdb_poster.py
mountPropagation: None
name: scripts
readOnly: true
subPath: select_tmdb_poster.py
volumes:
- name: config
persistentVolumeClaim:
claimName: tautulli-config
- configMap:
name: tautulli-scripts
name: scripts
---
# Source: tautulli/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tautulli-config-backup-secret
namespace: tautulli
labels:
app.kubernetes.io/name: tautulli-config-backup-secret
app.kubernetes.io/instance: tautulli
app.kubernetes.io/part-of: tautulli
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tautulli/tautulli-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tautulli/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tautulli
namespace: tautulli
labels:
app.kubernetes.io/name: http-route-tautulli
app.kubernetes.io/instance: tautulli
app.kubernetes.io/part-of: tautulli
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tautulli.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tautulli
port: 80
weight: 100
---
# Source: tautulli/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tautulli-config-backup-source
namespace: tautulli
labels:
app.kubernetes.io/name: tautulli-config-backup-source
app.kubernetes.io/instance: tautulli
app.kubernetes.io/part-of: tautulli
spec:
sourcePVC: tautulli-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tautulli-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -1,658 +0,0 @@
---
# Source: tdarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tdarr/charts/tdarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-config
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/charts/tdarr/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-server
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
volumeName: tdarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tdarr/charts/tdarr-exporter/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-api
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-api
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8266
targetPort: 8266
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-web
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-web
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8265
targetPort: 8265
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: tdarr-node
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: node
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
nodeSelector:
intel.feature.node.kubernetes.io/gpu: "true"
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: inContainer
value: "true"
- name: nodeName
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: serverIP
value: tdarr-api
- name: serverPort
value: "8266"
image: ghcr.io/haveagitgat/tdarr_node:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /tcache
name: node-cache
volumes:
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- emptyDir: {}
name: node-cache
---
# Source: tdarr/charts/tdarr-exporter/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: default
securityContext:
{}
containers:
- name: tdarr-exporter
securityContext:
{}
image: "docker.io/homeylab/tdarr-exporter:1.4.2"
imagePullPolicy: IfNotPresent
ports:
- name: metrics
containerPort: 9090
protocol: TCP
env:
- name: TDARR_URL
value: "http://tdarr-web.tdarr:8265"
- name: VERIFY_SSL
value: "false"
- name: LOG_LEVEL
value: "info"
- name: PROMETHEUS_PORT
value: "9090"
- name: PROMETHEUS_PATH
value: "/metrics"
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
startupProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 2
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-server
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: server
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: internalNode
value: "false"
- name: inContainer
value: "true"
- name: nodeName
value: tdarr-server
- name: serverIP
value: 0.0.0.0
- name: serverPort
value: "8266"
- name: webUIPort
value: "8265"
image: ghcr.io/haveagitgat/tdarr:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 200m
memory: 1Gi
volumeMounts:
- mountPath: /app/configs
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /app/server
name: server
- mountPath: /tcache
name: server-cache
volumes:
- name: config
persistentVolumeClaim:
claimName: tdarr-config
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- name: server
persistentVolumeClaim:
claimName: tdarr-server
- emptyDir: {}
name: server-cache
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-config-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-server-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-server"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tdarr
namespace: tdarr
labels:
app.kubernetes.io/name: http-route-tdarr
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tdarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tdarr-web
port: 8265
weight: 100
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-config-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-server-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-server
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-server-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/charts/tdarr-exporter/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
name: tdarr-tdarr-exporter
spec:
endpoints:
- interval: 1m
path: /metrics
port: metrics
scrapeTimeout: 15s
namespaceSelector:
matchNames:
- tdarr
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr-exporter/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "tdarr-tdarr-exporter-test-connection"
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: "docker.io/busybox:1.36.1"
command: ['wget']
args: ['tdarr-tdarr-exporter:9090/healthz']
restartPolicy: Never

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,441 +0,0 @@
---
# Source: tubearchivist/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: tubearchivist/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tubearchivist-nfs-storage
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-nfs-storage
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/YouTube
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tubearchivist
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "40Gi"
storageClassName: "ceph-block"
---
# Source: tubearchivist/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tubearchivist-nfs-storage
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-nfs-storage
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
volumeName: tubearchivist-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/service: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
namespace: tubearchivist
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 24000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
---
# Source: tubearchivist/charts/tubearchivist/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tubearchivist
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.4.0
namespace: tubearchivist
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: VPN_SERVICE_PROVIDER
value: protonvpn
- name: VPN_TYPE
value: wireguard
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: private-key
name: tubearchivist-wireguard-conf
- name: VPN_PORT_FORWARDING
value: "on"
- name: PORT_FORWARD_ONLY
value: "on"
- name: FIREWALL_OUTBOUND_SUBNETS
value: 10.0.0.0/8
- name: FIREWALL_INPUT_PORTS
value: 80,8000,24000
- name: DOT
value: "false"
- name: DNS_KEEP_NAMESERVER
value: "true"
- name: DNS_PLAINTEXT_ADDRESS
value: 10.96.0.10
image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30
imagePullPolicy: IfNotPresent
name: gluetun
resources:
limits:
devic.es/tun: "1"
requests:
cpu: 10m
devic.es/tun: "1"
memory: 128Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
- env:
- name: TZ
value: US/Central
- name: HOST_UID
value: "1000"
- name: HOST_GID
value: "1000"
- name: ES_URL
value: https://elasticsearch-tubearchivist-es-http.tubearchivist:9200
- name: ES_DISABLE_VERIFY_SSL
value: "true"
- name: REDIS_CON
value: redis://redis-replication-tubearchivist-master.tubearchivist:6379
- name: TA_HOST
value: https://tubearchivist.alexlebens.net http://tubearchivist.tubearchivist:80/
- name: TA_PORT
value: "24000"
- name: TA_USERNAME
value: admin
envFrom:
- secretRef:
name: tubearchivist-config-secret
image: bbilly1/tubearchivist:v0.5.8
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 1Gi
volumeMounts:
- mountPath: /cache
name: data
- mountPath: /youtube
name: youtube
volumes:
- name: data
persistentVolumeClaim:
claimName: tubearchivist
- name: youtube
persistentVolumeClaim:
claimName: tubearchivist-nfs-storage
---
# Source: tubearchivist/templates/elasticsearch.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: elasticsearch-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
version: 8.18.0
auth:
fileRealm:
- secretName: tubearchivist-elasticsearch-secret
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false
path.repo: /usr/share/elasticsearch/data/snapshot
podTemplate:
spec:
volumes:
- name: tubearchivist-snapshot-nfs-storage
nfs:
path: /volume2/Storage/TubeArchivist
server: synologybond.alexlebens.net
containers:
- name: elasticsearch
volumeMounts:
- name: tubearchivist-snapshot-nfs-storage
mountPath: /usr/share/elasticsearch/data/snapshot
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: ceph-block
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-config-secret
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-config-secret
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ELASTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/env
metadataPolicy: None
property: ELASTIC_PASSWORD
- secretKey: TA_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/env
metadataPolicy: None
property: TA_PASSWORD
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-elasticsearch-secret
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-elasticsearch-secret
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: username
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: username
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: password
- secretKey: roles
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/tubearchivist/elasticsearch
metadataPolicy: None
property: roles
---
# Source: tubearchivist/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tubearchivist-wireguard-conf
namespace: tubearchivist
labels:
app.kubernetes.io/name: tubearchivist-wireguard-conf
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /protonvpn/conf/cl01tl
metadataPolicy: None
property: private-key
---
# Source: tubearchivist/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: http-route-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tubearchivist.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tubearchivist
port: 80
weight: 100
---
# Source: tubearchivist/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: redis-replication-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: tubearchivist/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-tubearchivist
namespace: tubearchivist
labels:
app.kubernetes.io/name: redis-replication-tubearchivist
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -1,209 +0,0 @@
---
# Source: unpackerr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: unpackerr-nfs-storage
namespace: unpackerr
labels:
app.kubernetes.io/name: unpackerr-nfs-storage
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/part-of: unpackerr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: unpackerr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unpackerr-nfs-storage
namespace: unpackerr
labels:
app.kubernetes.io/name: unpackerr-nfs-storage
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/part-of: unpackerr
spec:
volumeName: unpackerr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: unpackerr/charts/unpackerr/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: unpackerr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: unpackerr
helm.sh/chart: unpackerr-4.4.0
namespace: unpackerr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: unpackerr
app.kubernetes.io/instance: unpackerr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/name: unpackerr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: UN_WEBSERVER_METRICS
value: "true"
- name: UN_SONARR_0_URL
value: http://sonarr.sonarr:80
- name: UN_SONARR_0_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_SONARR_1_URL
value: http://sonarr-4k.sonarr-4k:80
- name: UN_SONARR_1_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_SONARR_2_URL
value: http://sonarr-anime.sonarr-anime:80
- name: UN_SONARR_2_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_0_URL
value: http://radarr.radarr:80
- name: UN_RADARR_0_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_1_URL
value: http://radarr-4k.radarr-4k:80
- name: UN_RADARR_1_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_2_URL
value: http://radarr-anime.radarr-anime:80
- name: UN_RADARR_2_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_3_URL
value: http://radarr-standup.radarr-standup:80
- name: UN_RADARR_3_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_LIDARR_0_URL
value: http://lidarr.lidarr:80
- name: UN_LIDARR_0_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
envFrom:
- secretRef:
name: unpackerr-key-secret
image: golift/unpackerr:0.14.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /mnt/store
name: storage
volumes:
- name: storage
persistentVolumeClaim:
claimName: unpackerr-nfs-storage
---
# Source: unpackerr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: unpackerr-key-secret
namespace: unpackerr
labels:
app.kubernetes.io/name: unpackerr-key-secret
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/part-of: unpackerr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: UN_SONARR_0_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/sonarr4/key
metadataPolicy: None
property: key
- secretKey: UN_SONARR_1_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/sonarr4-4k/key
metadataPolicy: None
property: key
- secretKey: UN_SONARR_2_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/sonarr4-anime/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_0_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_1_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5-4k/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_2_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5-anime/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_3_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5-standup/key
metadataPolicy: None
property: key
- secretKey: UN_LIDARR_0_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/lidarr2/key
metadataPolicy: None
property: key

View File

@@ -1,151 +0,0 @@
---
# Source: unpoller/charts/unpoller/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: unpoller
labels:
app.kubernetes.io/instance: unpoller
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: unpoller
app.kubernetes.io/service: unpoller
helm.sh/chart: unpoller-4.4.0
namespace: unpoller
spec:
type: ClusterIP
ports:
- port: 9130
targetPort: 9130
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpoller
app.kubernetes.io/name: unpoller
---
# Source: unpoller/charts/unpoller/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: unpoller
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpoller
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: unpoller
helm.sh/chart: unpoller-4.4.0
namespace: unpoller
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: unpoller
app.kubernetes.io/instance: unpoller
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpoller
app.kubernetes.io/name: unpoller
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: UP_UNIFI_CONTROLLER_0_SAVE_ALARMS
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_ANOMALIES
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_DPI
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_EVENTS
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_IDS
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_SITES
value: "true"
- name: UP_UNIFI_CONTROLLER_0_URL
value: https://unifi.alexlebens.net/
- name: UP_UNIFI_CONTROLLER_0_VERIFY_SSL
value: "false"
- name: UP_INFLUXDB_DISABLE
value: "true"
- name: UP_PROMETHEUS_HTTP_LISTEN
value: 0.0.0.0:9130
- name: UP_PROMETHEUS_NAMESPACE
value: unpoller
- name: UP_POLLER_DEBUG
value: "false"
- name: UP_POLLER_QUIET
value: "false"
envFrom:
- secretRef:
name: unpoller-unifi-secret
image: ghcr.io/unpoller/unpoller:v2.16.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: unpoller/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: unpoller-unifi-secret
namespace: unpoller
labels:
app.kubernetes.io/name: unpoller-unifi-secret
app.kubernetes.io/instance: unpoller
app.kubernetes.io/part-of: unpoller
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: UP_UNIFI_CONTROLLER_0_USER
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth/cl01tl
metadataPolicy: None
property: user
- secretKey: UP_UNIFI_CONTROLLER_0_PASS
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth/cl01tl
metadataPolicy: None
property: password
---
# Source: unpoller/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: unpoller
namespace: unpoller
labels:
app.kubernetes.io/name: unpoller
app.kubernetes.io/instance: unpoller
app.kubernetes.io/part-of: unpoller
spec:
selector:
matchLabels:
app.kubernetes.io/name: unpoller
app.kubernetes.io/instance: unpoller
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics

Some files were not shown because too many files have changed in this diff Show More