Automated Manifest Update (#2172)

This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow.

Reviewed-on: #2172
Co-authored-by: gitea-bot <gitea-bot@alexlebens.net>
Co-committed-by: gitea-bot <gitea-bot@alexlebens.net>
This commit was merged in pull request #2172.
This commit is contained in:
2025-12-02 01:40:19 +00:00
committed by Alex Lebens
parent 15b191f7e4
commit f39fda9f14
17 changed files with 9000 additions and 0 deletions

View File

@@ -0,0 +1,360 @@
---
# Source: ephemera/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ephemera-import-nfs-storage
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-import-nfs-storage
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Books Import
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: ephemera/charts/ephemera/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ephemera
labels:
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
helm.sh/chart: ephemera-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: ephemera
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: ephemera/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ephemera-import-nfs-storage
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-import-nfs-storage
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
volumeName: ephemera-import-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: ephemera/charts/ephemera/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: ephemera
labels:
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
app.kubernetes.io/service: ephemera
helm.sh/chart: ephemera-4.4.0
namespace: ephemera
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8286
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/name: ephemera
---
# Source: ephemera/charts/ephemera/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ephemera
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ephemera
helm.sh/chart: ephemera-4.4.0
namespace: ephemera
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: ephemera
app.kubernetes.io/instance: ephemera
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ephemera
app.kubernetes.io/name: ephemera
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: APPRISE_STORAGE_MODE
value: memory
- name: APPRISE_STATEFUL_MODE
value: disabled
- name: APPRISE_WORKER_COUNT
value: "1"
- name: APPRISE_STATELESS_URLS
valueFrom:
secretKeyRef:
key: ntfy-url
name: ephemera-apprise-config
image: caronc/apprise:1.2.6
imagePullPolicy: IfNotPresent
name: apprise-api
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: LOG_LEVEL
value: info
- name: LOG_HTML
value: "false"
- name: CAPTCHA_SOLVER
value: none
- name: TZ
value: America/Chicago
image: ghcr.io/flaresolverr/flaresolverr:v3.4.5
imagePullPolicy: IfNotPresent
name: flaresolverr
resources:
requests:
cpu: 10m
memory: 128Mi
- env:
- name: AA_BASE_URL
value: https://annas-archive.org
- name: FLARESOLVERR_URL
value: http://127.0.0.1:8191
- name: LG_BASE_URL
value: https://gen.com
- name: PUID
value: "0"
- name: PGID
value: "0"
image: ghcr.io/orwellianepilogue/ephemera:1.3.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /app/downloads
name: cache
- mountPath: /app/data
name: config
- mountPath: /app/ingest
name: ingest
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: ephemera
- name: ingest
persistentVolumeClaim:
claimName: ephemera-import-nfs-storage
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-key-secret
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-key-secret
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/ephemera/config
metadataPolicy: None
property: key
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-apprise-config
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-apprise-config
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/ephemera/config
metadataPolicy: None
property: ntfy-url
---
# Source: ephemera/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-config-backup-secret
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-config-backup-secret
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/ephemera/ephemera-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: ephemera/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-ephemera
namespace: ephemera
labels:
app.kubernetes.io/name: http-route-ephemera
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ephemera.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: ephemera
port: 80
weight: 100
---
# Source: ephemera/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: ephemera-config-backup-source
namespace: ephemera
labels:
app.kubernetes.io/name: ephemera-config-backup-source
app.kubernetes.io/instance: ephemera
app.kubernetes.io/part-of: ephemera
spec:
sourcePVC: ephemera-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: ephemera-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,861 @@
---
# Source: jellystat/charts/jellystat/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellystat-data
labels:
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
helm.sh/chart: jellystat-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: jellystat
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: jellystat/charts/jellystat/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: jellystat
labels:
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
app.kubernetes.io/service: jellystat
helm.sh/chart: jellystat-4.4.0
namespace: jellystat
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/name: jellystat
---
# Source: jellystat/charts/jellystat/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellystat
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellystat
helm.sh/chart: jellystat-4.4.0
namespace: jellystat
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: jellystat
app.kubernetes.io/instance: jellystat
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: jellystat
app.kubernetes.io/name: jellystat
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: JWT_SECRET
valueFrom:
secretKeyRef:
key: secret-key
name: jellystat-secret
- name: JS_USER
valueFrom:
secretKeyRef:
key: user
name: jellystat-secret
- name: JS_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: jellystat-secret
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
key: username
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
key: dbname
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_IP
valueFrom:
secretKeyRef:
key: host
name: jellystat-postgresql-17-cluster-app
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
key: port
name: jellystat-postgresql-17-cluster-app
image: cyfershepard/jellystat:1.1.6
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /app/backend/backup-data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: jellystat-data
---
# Source: jellystat/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: jellystat-postgresql-17-cluster
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "jellystat-postgresql-17-external-backup"
serverName: "jellystat-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "jellystat-postgresql-17-garage-local-backup"
serverName: "jellystat-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-recovery"
serverName: jellystat-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: jellystat-postgresql-17-backup-1
externalClusters:
- name: jellystat-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "jellystat-postgresql-17-recovery"
serverName: jellystat-postgresql-17-backup-1
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: secret-key
- secretKey: user
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: user
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/jellystat/auth
metadataPolicy: None
property: password
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-data-backup-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-data-backup-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellystat/jellystat-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-postgresql-17-cluster-backup-secret
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: jellystat/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: jellystat-postgresql-17-cluster-backup-secret-garage
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: jellystat/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-jellystat
namespace: jellystat
labels:
app.kubernetes.io/name: http-route-jellystat
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- jellystat.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: jellystat
port: 80
weight: 100
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-external-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-garage-local-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "jellystat-postgresql-17-recovery"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: jellystat-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: jellystat/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: jellystat-postgresql-17-alert-rules
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/jellystat-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 1
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 2
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: jellystat
cnpg_cluster: jellystat-postgresql-17-cluster
---
# Source: jellystat/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: jellystat-data-backup-source
namespace: jellystat
labels:
app.kubernetes.io/name: jellystat-data-backup-source
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
spec:
sourcePVC: jellystat-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: jellystat-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "jellystat-postgresql-17-daily-backup-scheduled-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: jellystat-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-external-backup"
---
# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "jellystat-postgresql-17-live-backup-scheduled-backup"
namespace: jellystat
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: jellystat-postgresql-17
app.kubernetes.io/instance: jellystat
app.kubernetes.io/part-of: jellystat
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: jellystat-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "jellystat-postgresql-17-garage-local-backup"

View File

@@ -0,0 +1,221 @@
---
# Source: lidatube/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidatube-nfs-storage
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-nfs-storage
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Music
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: lidatube/charts/lidatube/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lidatube-config
labels:
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
helm.sh/chart: lidatube-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: lidatube
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: lidatube/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidatube-nfs-storage
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-nfs-storage
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
volumeName: lidatube-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: lidatube/charts/lidatube/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: lidatube
labels:
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
app.kubernetes.io/service: lidatube
helm.sh/chart: lidatube-4.4.0
namespace: lidatube
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/name: lidatube
---
# Source: lidatube/charts/lidatube/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidatube
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: lidatube
helm.sh/chart: lidatube-4.4.0
namespace: lidatube
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: lidatube
app.kubernetes.io/instance: lidatube
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: lidatube
app.kubernetes.io/name: lidatube
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: lidarr_address
value: http://lidarr.lidarr:80
- name: lidarr_api_key
valueFrom:
secretKeyRef:
key: lidarr_api_key
name: lidatube-secret
- name: sleep_interval
value: "360"
- name: sync_schedule
value: "4"
- name: attempt_lidarr_import
value: "true"
image: thewicklowwolf/lidatube:0.2.41
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /lidatube/config
name: config
- mountPath: /lidatube/downloads
name: music
volumes:
- name: config
persistentVolumeClaim:
claimName: lidatube-config
- name: music
persistentVolumeClaim:
claimName: lidatube-nfs-storage
---
# Source: lidatube/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: lidatube-secret
namespace: lidatube
labels:
app.kubernetes.io/name: lidatube-secret
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: lidarr_api_key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/lidarr2/key
metadataPolicy: None
property: key
---
# Source: lidatube/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-lidatube
namespace: lidatube
labels:
app.kubernetes.io/name: http-route-lidatube
app.kubernetes.io/instance: lidatube
app.kubernetes.io/part-of: lidatube
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- lidatube.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: lidatube
port: 80
weight: 100

View File

@@ -0,0 +1,180 @@
---
# Source: listenarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: listenarr-nfs-storage
namespace: listenarr
labels:
app.kubernetes.io/name: listenarr-nfs-storage
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Audiobooks
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: listenarr/charts/listenarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: listenarr
labels:
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
helm.sh/chart: listenarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: listenarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: listenarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: listenarr-nfs-storage
namespace: listenarr
labels:
app.kubernetes.io/name: listenarr-nfs-storage
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
volumeName: listenarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: listenarr/charts/listenarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: listenarr
labels:
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
app.kubernetes.io/service: listenarr
helm.sh/chart: listenarr-4.4.0
namespace: listenarr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/name: listenarr
---
# Source: listenarr/charts/listenarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: listenarr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: listenarr
helm.sh/chart: listenarr-4.4.0
namespace: listenarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: listenarr
app.kubernetes.io/instance: listenarr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: listenarr
app.kubernetes.io/name: listenarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: LISTENARR_PUBLIC_URL
value: https://listenarr.alexlebens.net
image: therobbiedavis/listenarr:canary-0.2.35
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 128Mi
volumeMounts:
- mountPath: /app/config
name: config
- mountPath: /data
name: media
volumes:
- name: config
persistentVolumeClaim:
claimName: listenarr
- name: media
persistentVolumeClaim:
claimName: listenarr-nfs-storage
---
# Source: listenarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-listenarr
namespace: listenarr
labels:
app.kubernetes.io/name: http-route-listenarr
app.kubernetes.io/instance: listenarr
app.kubernetes.io/part-of: listenarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- listenarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: listenarr
port: 80
weight: 100

View File

@@ -0,0 +1,100 @@
---
# Source: omni-tools/charts/omni-tools/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: omni-tools
labels:
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: omni-tools
app.kubernetes.io/service: omni-tools
helm.sh/chart: omni-tools-4.4.0
namespace: omni-tools
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/name: omni-tools
---
# Source: omni-tools/charts/omni-tools/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: omni-tools
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: omni-tools
helm.sh/chart: omni-tools-4.4.0
namespace: omni-tools
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: omni-tools
app.kubernetes.io/instance: omni-tools
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/name: omni-tools
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: iib0011/omni-tools:0.6.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 50m
memory: 512Mi
---
# Source: omni-tools/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-omni-tools
namespace: omni-tools
labels:
app.kubernetes.io/name: http-route-omni-tools
app.kubernetes.io/instance: omni-tools
app.kubernetes.io/part-of: omni-tools
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- omni-tools.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: omni-tools
port: 80
weight: 100

View File

@@ -0,0 +1,988 @@
---
# Source: outline/charts/outline/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: outline
labels:
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: outline
app.kubernetes.io/service: outline
helm.sh/chart: outline-4.4.0
namespace: outline
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: outline
---
# Source: outline/charts/cloudflared-outline/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline-cloudflared-outline
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-outline
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-outline-1.23.0
namespace: outline
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-outline
app.kubernetes.io/instance: outline
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: cloudflared-outline
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: outline-cloudflared-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: outline/charts/outline/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: outline
helm.sh/chart: outline-4.4.0
namespace: outline
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: outline
app.kubernetes.io/instance: outline
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: outline
app.kubernetes.io/name: outline
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: NODE_ENV
value: production
- name: URL
value: https://wiki.alexlebens.dev
- name: PORT
value: "3000"
- name: SECRET_KEY
valueFrom:
secretKeyRef:
key: secret-key
name: outline-key-secret
- name: UTILS_SECRET
valueFrom:
secretKeyRef:
key: utils-key
name: outline-key-secret
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
key: username
name: outline-postgresql-17-cluster-app
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_NAME
valueFrom:
secretKeyRef:
key: dbname
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_HOST
valueFrom:
secretKeyRef:
key: host
name: outline-postgresql-17-cluster-app
- name: POSTGRES_DATABASE_PORT
valueFrom:
secretKeyRef:
key: port
name: outline-postgresql-17-cluster-app
- name: DATABASE_URL
value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)
- name: DATABASE_URL_TEST
value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test
- name: DATABASE_CONNECTION_POOL_MIN
value: "2"
- name: DATABASE_CONNECTION_POOL_MAX
value: "20"
- name: PGSSLMODE
value: disable
- name: REDIS_URL
value: redis://redis-replication-outline-master.outline:6379
- name: FILE_STORAGE
value: s3
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: ceph-bucket-outline
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: ceph-bucket-outline
- name: AWS_REGION
value: us-east-1
- name: AWS_S3_UPLOAD_BUCKET_NAME
valueFrom:
configMapKeyRef:
key: BUCKET_NAME
name: ceph-bucket-outline
- name: AWS_S3_UPLOAD_BUCKET_URL
value: https://objects.alexlebens.dev
- name: AWS_S3_FORCE_PATH_STYLE
value: "true"
- name: AWS_S3_ACL
value: private
- name: FILE_STORAGE_UPLOAD_MAX_SIZE
value: "26214400"
- name: FORCE_HTTPS
value: "false"
- name: ENABLE_UPDATES
value: "false"
- name: WEB_CONCURRENCY
value: "1"
- name: FILE_STORAGE_IMPORT_MAX_SIZE
value: "5.12e+06"
- name: LOG_LEVEL
value: info
- name: DEFAULT_LANGUAGE
value: en_US
- name: RATE_LIMITER_ENABLED
value: "false"
- name: DEVELOPMENT_UNSAFE_INLINE_CSP
value: "false"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
key: client
name: outline-oidc-secret
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: secret
name: outline-oidc-secret
- name: OIDC_AUTH_URI
value: https://auth.alexlebens.dev/application/o/authorize/
- name: OIDC_TOKEN_URI
value: https://auth.alexlebens.dev/application/o/token/
- name: OIDC_USERINFO_URI
value: https://auth.alexlebens.dev/application/o/userinfo/
- name: OIDC_USERNAME_CLAIM
value: email
- name: OIDC_DISPLAY_NAME
value: Authentik
- name: OIDC_SCOPES
value: openid profile email
image: outlinewiki/outline:1.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
---
# Source: outline/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: outline-postgresql-17-cluster
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "outline-postgresql-17-external-backup"
serverName: "outline-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "outline-postgresql-17-garage-local-backup"
serverName: "outline-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-recovery"
serverName: outline-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: outline-postgresql-17-backup-1
externalClusters:
- name: outline-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "outline-postgresql-17-recovery"
serverName: outline-postgresql-17-backup-1
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-key-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-key-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/outline/key
metadataPolicy: None
property: secret-key
- secretKey: utils-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/outline/key
metadataPolicy: None
property: utils-key
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-oidc-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-oidc-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: client
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/outline
metadataPolicy: None
property: client
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/outline
metadataPolicy: None
property: secret
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-cloudflared-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-cloudflared-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/outline
metadataPolicy: None
property: token
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-postgresql-17-cluster-backup-secret
namespace: outline
labels:
app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: outline/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: outline-postgresql-17-cluster-backup-secret-garage
namespace: outline
labels:
app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: outline/templates/object-bucket-claim.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket-outline
labels:
app.kubernetes.io/name: ceph-bucket-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
generateBucketName: bucket-outline
storageClassName: ceph-bucket
additionalConfig:
bucketPolicy: |
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor",
"Effect": "Allow",
"Action": [
"s3:GetObjectAcl",
"s3:DeleteObject",
"s3:PutObject",
"s3:GetObject",
"s3:PutObjectAcl"
],
"Resource": "arn:aws:s3:::bucket-outline-630c57e0-d475-4d78-926c-c1c082291d73/*"
}
]
}
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-external-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-garage-local-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "outline-postgresql-17-recovery"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: outline-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: outline/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: outline-postgresql-17-alert-rules
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/outline-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 1
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 2
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: outline
cnpg_cluster: outline-postgresql-17-cluster
---
# Source: outline/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-outline
namespace: outline
labels:
app.kubernetes.io/name: redis-replication-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "outline-postgresql-17-daily-backup-scheduled-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: outline-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-external-backup"
---
# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "outline-postgresql-17-live-backup-scheduled-backup"
namespace: outline
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: outline-postgresql-17
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: outline-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "outline-postgresql-17-garage-local-backup"
---
# Source: outline/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-outline
namespace: outline
labels:
app.kubernetes.io/name: redis-replication-outline
app.kubernetes.io/instance: outline
app.kubernetes.io/part-of: outline
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -0,0 +1,215 @@
---
# Source: overseerr/charts/app-template/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: overseerr-main
labels:
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
helm.sh/chart: app-template-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: overseerr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: overseerr/charts/app-template/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: overseerr
labels:
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
app.kubernetes.io/service: overseerr
helm.sh/chart: app-template-4.4.0
namespace: overseerr
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5055
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/name: overseerr
---
# Source: overseerr/charts/app-template/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: overseerr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: overseerr
helm.sh/chart: app-template-4.4.0
namespace: overseerr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: overseerr
app.kubernetes.io/instance: overseerr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: overseerr
app.kubernetes.io/name: overseerr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/sct/overseerr:1.34.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
volumeMounts:
- mountPath: /app/config
name: main
volumes:
- name: main
persistentVolumeClaim:
claimName: overseerr-main
---
# Source: overseerr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: overseerr-main-backup-secret
namespace: overseerr
labels:
app.kubernetes.io/name: overseerr-main-backup-secret
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/overseerr/overseerr-main"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: overseerr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-overseerr
namespace: overseerr
labels:
app.kubernetes.io/name: http-route-overseerr
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- overseerr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: overseerr
port: 80
weight: 100
---
# Source: overseerr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: overseerr-main-backup-source
namespace: overseerr
labels:
app.kubernetes.io/name: overseerr-main-backup-source
app.kubernetes.io/instance: overseerr
app.kubernetes.io/part-of: overseerr
spec:
sourcePVC: overseerr-main
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: overseerr-main-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,773 @@
---
# Source: photoview/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: photoview-nfs-storage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-nfs-storage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Pictures
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: photoview/charts/photoview/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: photoview-cache
labels:
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: photoview/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: photoview-nfs-storage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-nfs-storage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
volumeName: photoview-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: photoview/charts/photoview/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: photoview
labels:
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
app.kubernetes.io/service: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/name: photoview
---
# Source: photoview/charts/photoview/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: photoview
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: photoview
helm.sh/chart: photoview-4.4.0
namespace: photoview
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: photoview
app.kubernetes.io/instance: photoview
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: photoview
app.kubernetes.io/name: photoview
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
/bin/chown -R 999:999 /app/cache
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-chmod-data
resources:
requests:
cpu: 100m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /app/cache
name: cache
containers:
- env:
- name: PHOTOVIEW_DATABASE_DRIVER
value: postgres
- name: PHOTOVIEW_POSTGRES_URL
valueFrom:
secretKeyRef:
key: uri
name: photoview-postgresql-17-cluster-app
- name: PHOTOVIEW_MEDIA_CACHE
value: /app/cache
image: photoview/photoview:2.4.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 512Mi
volumeMounts:
- mountPath: /app/cache
name: cache
- mountPath: /photos
name: media
readOnly: true
volumes:
- name: cache
persistentVolumeClaim:
claimName: photoview-cache
- name: media
persistentVolumeClaim:
claimName: photoview-nfs-storage
---
# Source: photoview/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: photoview-postgresql-17-cluster
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "photoview-postgresql-17-external-backup"
serverName: "photoview-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "photoview-postgresql-17-garage-local-backup"
serverName: "photoview-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-recovery"
serverName: photoview-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: photoview-postgresql-17-backup-1
externalClusters:
- name: photoview-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "photoview-postgresql-17-recovery"
serverName: photoview-postgresql-17-backup-1
---
# Source: photoview/templates/external-secrets.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: photoview-postgresql-17-cluster-backup-secret
namespace: photoview
labels:
app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: photoview/templates/external-secrets.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: photoview-postgresql-17-cluster-backup-secret-garage
namespace: photoview
labels:
app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: photoview/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-photoview
namespace: photoview
labels:
app.kubernetes.io/name: http-route-photoview
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- photoview.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: photoview
port: 80
weight: 100
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-external-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-garage-local-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "photoview-postgresql-17-recovery"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: photoview-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: photoview/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: photoview-postgresql-17-alert-rules
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/photoview-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 1
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 2
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: photoview
cnpg_cluster: photoview-postgresql-17-cluster
---
# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "photoview-postgresql-17-daily-backup-scheduled-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: photoview-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-external-backup"
---
# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "photoview-postgresql-17-live-backup-scheduled-backup"
namespace: photoview
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: photoview-postgresql-17
app.kubernetes.io/instance: photoview
app.kubernetes.io/part-of: photoview
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: photoview-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "photoview-postgresql-17-garage-local-backup"

View File

@@ -0,0 +1,190 @@
---
# Source: plex/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: plex-nfs-storage
namespace: plex
labels:
app.kubernetes.io/name: plex-nfs-storage
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: plex/charts/plex/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: plex-config
labels:
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "100Gi"
storageClassName: "ceph-block"
---
# Source: plex/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-nfs-storage
namespace: plex
labels:
app.kubernetes.io/name: plex-nfs-storage
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
volumeName: plex-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: plex/charts/plex/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: plex
labels:
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
app.kubernetes.io/service: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
type: LoadBalancer
ports:
- port: 32400
targetPort: 32400
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/name: plex
---
# Source: plex/charts/plex/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: plex
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: plex
helm.sh/chart: plex-4.4.0
namespace: plex
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: plex
app.kubernetes.io/instance: plex
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: plex
app.kubernetes.io/name: plex
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: VERSION
value: docker
- name: PLEX_CLAIM
value: claim-XmGK2o9x54PbCzQaqj-J
image: ghcr.io/linuxserver/plex:1.42.2@sha256:ab81c7313fb5dc4d1f9562e5bbd5e5877a8a3c5ca6b9f9fff3437b5096a2b123
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /config
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /transcode
name: transcode
volumes:
- name: config
persistentVolumeClaim:
claimName: plex-config
- name: media
persistentVolumeClaim:
claimName: plex-nfs-storage
- emptyDir: {}
name: transcode
---
# Source: plex/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-plex
namespace: plex
labels:
app.kubernetes.io/name: http-route-plex
app.kubernetes.io/instance: plex
app.kubernetes.io/part-of: plex
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- plex.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: plex
port: 32400
weight: 100

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,435 @@
---
# Source: searxng/charts/searxng/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-browser-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: searxng/charts/searxng/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: searxng-api-data
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-api
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-api
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-browser
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-browser
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-browser
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: browser
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: browser
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: https://searxng.alexlebens.net/
- name: SEARXNG_QUERY_URL
value: https://searxng.alexlebens.net/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng.alexlebens.net
- name: SEARXNG_REDIS_URL
value: redis://redis-replication-searxng-master.searxng:6379/0
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
image: searxng/searxng:latest@sha256:faa7118f9167c2c1e09a3fbb9bd87eee0905d76456d297e62e815646afc97037
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: browser-data
volumes:
- name: browser-data
persistentVolumeClaim:
claimName: searxng-browser-data
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng-api
labels:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: api
app.kubernetes.io/name: searxng
app.kubernetes.io/instance: searxng
template:
metadata:
labels:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SEARXNG_BASE_URL
value: http://searxng-api.searxng:8080
- name: SEARXNG_QUERY_URL
value: http://searxng-api.searxng:8080/search?q=<query>
- name: SEARXNG_HOSTNAME
value: searxng-api.searxng
- name: UWSGI_WORKERS
value: "4"
- name: UWSGI_THREADS
value: "4"
- name: ENABLE_RAG_WEB_SEARCH
value: "true"
- name: RAG_WEB_SEARCH_ENGINE
value: searxng
- name: RAG_WEB_SEARCH_RESULT_COUNT
value: "3"
- name: RAG_WEB_SEARCH_CONCURRENT_REQUESTS
value: "10"
image: searxng/searxng:latest@sha256:faa7118f9167c2c1e09a3fbb9bd87eee0905d76456d297e62e815646afc97037
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /etc/searxng
name: api-data
- mountPath: /etc/searxng/settings.yml
mountPropagation: None
name: config
readOnly: true
subPath: settings.yml
- mountPath: /etc/searxng/limiter.toml
mountPropagation: None
name: config
readOnly: true
subPath: limiter.toml
volumes:
- name: api-data
persistentVolumeClaim:
claimName: searxng-api-data
- name: config
secret:
secretName: searxng-api-config-secret
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: searxng-api-config-secret
namespace: searxng
labels:
app.kubernetes.io/name: searxng-api-config-secret
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: settings.yml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/searxng/api/config
metadataPolicy: None
property: settings.yml
- secretKey: limiter.toml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/searxng/api/config
metadataPolicy: None
property: limiter.toml
---
# Source: searxng/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: searxng-browser-data-backup-secret
namespace: searxng
labels:
app.kubernetes.io/name: searxng-browser-data-backup-secret
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/searxng/searxng-browser-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: searxng/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-searxng
namespace: searxng
labels:
app.kubernetes.io/name: http-route-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- searxng.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: searxng-browser
port: 80
weight: 100
---
# Source: searxng/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-searxng
namespace: searxng
labels:
app.kubernetes.io/name: redis-replication-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: searxng/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: searxng-browser-data-backup-source
namespace: searxng
labels:
app.kubernetes.io/name: searxng-browser-data-backup-source
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
spec:
sourcePVC: searxng-browser-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: searxng-browser-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: searxng/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-searxng
namespace: searxng
labels:
app.kubernetes.io/name: redis-replication-searxng
app.kubernetes.io/instance: searxng
app.kubernetes.io/part-of: searxng
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -0,0 +1,153 @@
---
# Source: site-documentation/charts/site-documentation/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: site-documentation
labels:
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-documentation
app.kubernetes.io/service: site-documentation
helm.sh/chart: site-documentation-4.4.0
namespace: site-documentation
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4321
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: site-documentation
---
# Source: site-documentation/charts/cloudflared-site/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-documentation-cloudflared-site
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-site-1.23.0
namespace: site-documentation
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/instance: site-documentation
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: cloudflared-site
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: site-documentation-cloudflared-api-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-documentation/charts/site-documentation/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-documentation
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-documentation
helm.sh/chart: site-documentation-4.4.0
namespace: site-documentation
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: site-documentation
app.kubernetes.io/instance: site-documentation
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/name: site-documentation
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: harbor.alexlebens.net/images/site-documentation:0.0.3
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-documentation/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: site-documentation-cloudflared-api-secret
namespace: site-documentation
labels:
app.kubernetes.io/name: site-documentation-cloudflared-api-secret
app.kubernetes.io/instance: site-documentation
app.kubernetes.io/part-of: site-documentation
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/site-documentation
metadataPolicy: None
property: token

View File

@@ -0,0 +1,153 @@
---
# Source: site-profile/charts/site-profile/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: site-profile
labels:
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-profile
app.kubernetes.io/service: site-profile
helm.sh/chart: site-profile-4.4.0
namespace: site-profile
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4321
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: site-profile
---
# Source: site-profile/charts/cloudflared-site/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-profile-cloudflared-site
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/version: 2025.10.0
helm.sh/chart: cloudflared-site-1.23.0
namespace: site-profile
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: cloudflared-site
app.kubernetes.io/instance: site-profile
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: cloudflared-site
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- --protocol
- http2
- --no-autoupdate
- run
- --token
- $(CF_MANAGED_TUNNEL_TOKEN)
env:
- name: CF_MANAGED_TUNNEL_TOKEN
valueFrom:
secretKeyRef:
key: cf-tunnel-token
name: site-profile-cloudflared-api-secret
image: cloudflare/cloudflared:2025.11.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-profile/charts/site-profile/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: site-profile
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: site-profile
helm.sh/chart: site-profile-4.4.0
namespace: site-profile
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: site-profile
app.kubernetes.io/instance: site-profile
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: site-profile
app.kubernetes.io/name: site-profile
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: harbor.alexlebens.net/images/site-profile:2.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: site-profile/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: site-profile-cloudflared-api-secret
namespace: site-profile
labels:
app.kubernetes.io/name: site-profile-cloudflared-api-secret
app.kubernetes.io/instance: site-profile
app.kubernetes.io/part-of: site-profile
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/site-profile
metadataPolicy: None
property: token

View File

@@ -0,0 +1,396 @@
---
# Source: slskd/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: slskd
labels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: slskd/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: slskd-nfs-storage
namespace: slskd
labels:
app.kubernetes.io/name: slskd-nfs-storage
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: slskd/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: slskd-nfs-storage
namespace: slskd
labels:
app.kubernetes.io/name: slskd-nfs-storage
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
volumeName: slskd-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: slskd/charts/slskd/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: slskd
labels:
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
app.kubernetes.io/service: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
type: ClusterIP
ports:
- port: 5030
targetPort: 5030
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
---
# Source: slskd/charts/slskd/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: slskd-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- args:
- -ec
- |
sysctl -w net.ipv4.ip_forward=1;
sysctl -w net.ipv6.conf.all.disable_ipv6=1
command:
- /bin/sh
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-sysctl
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
privileged: true
containers:
- env:
- name: VPN_SERVICE_PROVIDER
value: protonvpn
- name: VPN_TYPE
value: wireguard
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: private-key
name: slskd-wireguard-conf
- name: VPN_PORT_FORWARDING
value: "on"
- name: PORT_FORWARD_ONLY
value: "on"
- name: FIREWALL_OUTBOUND_SUBNETS
value: 192.168.1.0/24,10.244.0.0/16
- name: FIREWALL_INPUT_PORTS
value: 5030,50300
- name: DOT
value: "off"
image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30
imagePullPolicy: IfNotPresent
name: gluetun
resources:
limits:
devic.es/tun: "1"
requests:
cpu: 10m
devic.es/tun: "1"
memory: 128Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
privileged: true
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: SLSKD_UMASK
value: "0"
image: slskd/slskd:0.24.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: data
- mountPath: /app/slskd.yml
mountPropagation: None
name: slskd-config
readOnly: true
subPath: slskd.yml
volumes:
- name: data
persistentVolumeClaim:
claimName: slskd-nfs-storage
- name: slskd-config
secret:
secretName: slskd-config-secret
---
# Source: slskd/charts/slskd/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: slskd-soularr
labels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/instance: slskd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: slskd
helm.sh/chart: slskd-4.4.0
namespace: slskd
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
template:
metadata:
labels:
app.kubernetes.io/controller: soularr
app.kubernetes.io/instance: slskd
app.kubernetes.io/name: slskd
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: SCRIPT_INTERVAL
value: "300"
image: mrusse08/soularr:latest@sha256:71a0b9e5a522d76bb0ffdb6d720d681fde22417b3a5acc9ecae61c89d05d8afc
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /mnt/store
name: data
- mountPath: /data/config.ini
mountPropagation: None
name: soularr-config
readOnly: true
subPath: config.ini
volumes:
- name: data
persistentVolumeClaim:
claimName: slskd-nfs-storage
- name: soularr-config
secret:
secretName: soularr-config-secret
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: slskd-config-secret
namespace: slskd
labels:
app.kubernetes.io/name: slskd-config-secret
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: slskd.yml
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/slskd/config
metadataPolicy: None
property: slskd.yml
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: soularr-config-secret
namespace: slskd
labels:
app.kubernetes.io/name: soularr-config-secret
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: config.ini
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/slskd/soularr
metadataPolicy: None
property: config.ini
---
# Source: slskd/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: slskd-wireguard-conf
namespace: slskd
labels:
app.kubernetes.io/name: slskd-wireguard-conf
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /protonvpn/conf/cl01tl
metadataPolicy: None
property: private-key
---
# Source: slskd/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-slskd
namespace: slskd
labels:
app.kubernetes.io/name: http-route-slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- slskd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: slskd
port: 5030
weight: 100
---
# Source: slskd/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: slskd
namespace: slskd
labels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
app.kubernetes.io/part-of: slskd
spec:
selector:
matchLabels:
app.kubernetes.io/name: slskd
app.kubernetes.io/instance: slskd
endpoints:
- port: http
interval: 3m
scrapeTimeout: 1m
path: /metrics

View File

@@ -0,0 +1,658 @@
---
# Source: tdarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tdarr/charts/tdarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-config
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/charts/tdarr/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-server
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
volumeName: tdarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tdarr/charts/tdarr-exporter/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-api
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-api
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8266
targetPort: 8266
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-web
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-web
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8265
targetPort: 8265
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: tdarr-node
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: node
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
nodeSelector:
intel.feature.node.kubernetes.io/gpu: "true"
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: inContainer
value: "true"
- name: nodeName
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: serverIP
value: tdarr-api
- name: serverPort
value: "8266"
image: ghcr.io/haveagitgat/tdarr_node:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /tcache
name: node-cache
volumes:
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- emptyDir: {}
name: node-cache
---
# Source: tdarr/charts/tdarr-exporter/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: default
securityContext:
{}
containers:
- name: tdarr-exporter
securityContext:
{}
image: "docker.io/homeylab/tdarr-exporter:1.4.2"
imagePullPolicy: IfNotPresent
ports:
- name: metrics
containerPort: 9090
protocol: TCP
env:
- name: TDARR_URL
value: "http://tdarr-web.tdarr:8265"
- name: VERIFY_SSL
value: "false"
- name: LOG_LEVEL
value: "info"
- name: PROMETHEUS_PORT
value: "9090"
- name: PROMETHEUS_PATH
value: "/metrics"
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
startupProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 2
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-server
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: server
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: internalNode
value: "false"
- name: inContainer
value: "true"
- name: nodeName
value: tdarr-server
- name: serverIP
value: 0.0.0.0
- name: serverPort
value: "8266"
- name: webUIPort
value: "8265"
image: ghcr.io/haveagitgat/tdarr:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 200m
memory: 1Gi
volumeMounts:
- mountPath: /app/configs
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /app/server
name: server
- mountPath: /tcache
name: server-cache
volumes:
- name: config
persistentVolumeClaim:
claimName: tdarr-config
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- name: server
persistentVolumeClaim:
claimName: tdarr-server
- emptyDir: {}
name: server-cache
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-config-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-server-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-server"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tdarr
namespace: tdarr
labels:
app.kubernetes.io/name: http-route-tdarr
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tdarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tdarr-web
port: 8265
weight: 100
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-config-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-server-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-server
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-server-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/charts/tdarr-exporter/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
name: tdarr-tdarr-exporter
spec:
endpoints:
- interval: 1m
path: /metrics
port: metrics
scrapeTimeout: 15s
namespaceSelector:
matchNames:
- tdarr
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr-exporter/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "tdarr-tdarr-exporter-test-connection"
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: "docker.io/busybox:1.36.1"
command: ['wget']
args: ['tdarr-tdarr-exporter:9090/healthz']
restartPolicy: Never