From f39fda9f14d4432b2fb77edc9adffe9770697657 Mon Sep 17 00:00:00 2001 From: gitea-bot Date: Tue, 2 Dec 2025 01:40:19 +0000 Subject: [PATCH] Automated Manifest Update (#2172) This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow. Reviewed-on: https://gitea.alexlebens.dev/alexlebens/infrastructure/pulls/2172 Co-authored-by: gitea-bot Co-committed-by: gitea-bot --- .../cl01tl/manifests/ephemera/ephemera.yaml | 360 +++++ .../cl01tl/manifests/homepage/homepage.yaml | 1132 ++++++++++++++++ .../cl01tl/manifests/jellystat/jellystat.yaml | 861 ++++++++++++ .../cl01tl/manifests/lidatube/lidatube.yaml | 221 +++ .../cl01tl/manifests/listenarr/listenarr.yaml | 180 +++ .../manifests/omni-tools/omni-tools.yaml | 100 ++ .../cl01tl/manifests/outline/outline.yaml | 988 ++++++++++++++ .../cl01tl/manifests/overseerr/overseerr.yaml | 215 +++ .../cl01tl/manifests/photoview/photoview.yaml | 773 +++++++++++ clusters/cl01tl/manifests/plex/plex.yaml | 190 +++ clusters/cl01tl/manifests/postiz/postiz.yaml | 1180 +++++++++++++++++ .../cl01tl/manifests/roundcube/roundcube.yaml | 1005 ++++++++++++++ .../cl01tl/manifests/searxng/searxng.yaml | 435 ++++++ .../site-documentation.yaml | 153 +++ .../manifests/site-profile/site-profile.yaml | 153 +++ clusters/cl01tl/manifests/slskd/slskd.yaml | 396 ++++++ clusters/cl01tl/manifests/tdarr/tdarr.yaml | 658 +++++++++ 17 files changed, 9000 insertions(+) create mode 100644 clusters/cl01tl/manifests/ephemera/ephemera.yaml create mode 100644 clusters/cl01tl/manifests/homepage/homepage.yaml create mode 100644 clusters/cl01tl/manifests/jellystat/jellystat.yaml create mode 100644 clusters/cl01tl/manifests/lidatube/lidatube.yaml create mode 100644 clusters/cl01tl/manifests/listenarr/listenarr.yaml create mode 100644 clusters/cl01tl/manifests/omni-tools/omni-tools.yaml create mode 100644 clusters/cl01tl/manifests/outline/outline.yaml create mode 100644 clusters/cl01tl/manifests/overseerr/overseerr.yaml create mode 100644 clusters/cl01tl/manifests/photoview/photoview.yaml create mode 100644 clusters/cl01tl/manifests/plex/plex.yaml create mode 100644 clusters/cl01tl/manifests/postiz/postiz.yaml create mode 100644 clusters/cl01tl/manifests/roundcube/roundcube.yaml create mode 100644 clusters/cl01tl/manifests/searxng/searxng.yaml create mode 100644 clusters/cl01tl/manifests/site-documentation/site-documentation.yaml create mode 100644 clusters/cl01tl/manifests/site-profile/site-profile.yaml create mode 100644 clusters/cl01tl/manifests/slskd/slskd.yaml create mode 100644 clusters/cl01tl/manifests/tdarr/tdarr.yaml diff --git a/clusters/cl01tl/manifests/ephemera/ephemera.yaml b/clusters/cl01tl/manifests/ephemera/ephemera.yaml new file mode 100644 index 000000000..664b082ff --- /dev/null +++ b/clusters/cl01tl/manifests/ephemera/ephemera.yaml @@ -0,0 +1,360 @@ +--- +# Source: ephemera/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ephemera-import-nfs-storage + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-import-nfs-storage + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Books Import + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: ephemera/charts/ephemera/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ephemera + labels: + app.kubernetes.io/instance: ephemera + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ephemera + helm.sh/chart: ephemera-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: ephemera +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: ephemera/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ephemera-import-nfs-storage + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-import-nfs-storage + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + volumeName: ephemera-import-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: ephemera/charts/ephemera/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: ephemera + labels: + app.kubernetes.io/instance: ephemera + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ephemera + app.kubernetes.io/service: ephemera + helm.sh/chart: ephemera-4.4.0 + namespace: ephemera +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8286 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: ephemera + app.kubernetes.io/name: ephemera +--- +# Source: ephemera/charts/ephemera/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ephemera + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: ephemera + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ephemera + helm.sh/chart: ephemera-4.4.0 + namespace: ephemera +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: ephemera + app.kubernetes.io/instance: ephemera + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: ephemera + app.kubernetes.io/name: ephemera + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: APPRISE_STORAGE_MODE + value: memory + - name: APPRISE_STATEFUL_MODE + value: disabled + - name: APPRISE_WORKER_COUNT + value: "1" + - name: APPRISE_STATELESS_URLS + valueFrom: + secretKeyRef: + key: ntfy-url + name: ephemera-apprise-config + image: caronc/apprise:1.2.6 + imagePullPolicy: IfNotPresent + name: apprise-api + resources: + requests: + cpu: 10m + memory: 128Mi + - env: + - name: LOG_LEVEL + value: info + - name: LOG_HTML + value: "false" + - name: CAPTCHA_SOLVER + value: none + - name: TZ + value: America/Chicago + image: ghcr.io/flaresolverr/flaresolverr:v3.4.5 + imagePullPolicy: IfNotPresent + name: flaresolverr + resources: + requests: + cpu: 10m + memory: 128Mi + - env: + - name: AA_BASE_URL + value: https://annas-archive.org + - name: FLARESOLVERR_URL + value: http://127.0.0.1:8191 + - name: LG_BASE_URL + value: https://gen.com + - name: PUID + value: "0" + - name: PGID + value: "0" + image: ghcr.io/orwellianepilogue/ephemera:1.3.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 128Mi + volumeMounts: + - mountPath: /app/downloads + name: cache + - mountPath: /app/data + name: config + - mountPath: /app/ingest + name: ingest + volumes: + - emptyDir: {} + name: cache + - name: config + persistentVolumeClaim: + claimName: ephemera + - name: ingest + persistentVolumeClaim: + claimName: ephemera-import-nfs-storage +--- +# Source: ephemera/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ephemera-key-secret + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-key-secret + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/ephemera/config + metadataPolicy: None + property: key +--- +# Source: ephemera/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ephemera-apprise-config + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-apprise-config + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ntfy-url + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/ephemera/config + metadataPolicy: None + property: ntfy-url +--- +# Source: ephemera/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ephemera-config-backup-secret + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-config-backup-secret + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/ephemera/ephemera-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: ephemera/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-ephemera + namespace: ephemera + labels: + app.kubernetes.io/name: http-route-ephemera + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - ephemera.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: ephemera + port: 80 + weight: 100 +--- +# Source: ephemera/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: ephemera-config-backup-source + namespace: ephemera + labels: + app.kubernetes.io/name: ephemera-config-backup-source + app.kubernetes.io/instance: ephemera + app.kubernetes.io/part-of: ephemera +spec: + sourcePVC: ephemera-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: ephemera-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot + cacheCapacity: 10Gi diff --git a/clusters/cl01tl/manifests/homepage/homepage.yaml b/clusters/cl01tl/manifests/homepage/homepage.yaml new file mode 100644 index 000000000..97261bdad --- /dev/null +++ b/clusters/cl01tl/manifests/homepage/homepage.yaml @@ -0,0 +1,1132 @@ +--- +# Source: homepage/charts/homepage/templates/common.yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: homepage + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage +secrets: + - name: homepage-homepage-sa-token +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: + name: homepage-homepage-sa-token + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + annotations: + kubernetes.io/service-account.name: homepage + namespace: homepage +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: homepage + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage +data: + bookmarks.yaml: | + - External Services: + - Github: + - abbr: GH + href: https://github.com/alexlebens + - Digital Ocean: + - abbr: DO + href: https://www.digitalocean.com/ + - AWS: + - abbr: AW + href: https://aws.amazon.com/console/ + - Cloudflare: + - abbr: CF + href: https://dash.cloudflare.com/b76e303258b84076ee01fd0f515c0768 + - Tailscale: + - abbr: TS + href: https://login.tailscale.com/admin/machines + - ProtonVPN: + - abbr: PV + href: https://account.protonvpn.com/ + - Unifi: + - abbr: UF + href: https://unifi.ui.com/ + - Pushover: + - abbr: PO + href: https://pushover.net + - ReCaptcha: + - abbr: RC + href: https://www.google.com/recaptcha/admin/site/698983587 + - Trackers: + - Torrentleech: + - abbr: TL + href: https://www.torrentleech.org + - Avistaz: + - abbr: AV + href: https://avistaz.to + - Cinemaz: + - abbr: CM + href: https://cinemaz.to + - Cathode Ray Tube: + - abbr: CRT + href: https://www.cathode-ray.tube + - Alpha Ratio: + - abbr: AL + href: https://alpharatio.cc/ + - MV Group: + - abbr: MV + href: https://forums.mvgroup.org + docker.yaml: "" + kubernetes.yaml: | + mode: cluster + services.yaml: | + - Media: + - Plex: + icon: sh-plex.webp + description: Media server + href: https://plex.alexlebens.net + siteMonitor: http://plex.plex:32400 + statusStyle: dot + - Jellyfin: + icon: sh-jellyfin.webp + description: Media server + href: https://jellyfin.alexlebens.net + siteMonitor: http://jellyfin.jellyfin:80 + statusStyle: dot + - Media Requests: + icon: sh-overseerr.webp + description: Overseer + href: https://overseerr.alexlebens.net + siteMonitor: http://overseerr.overseerr:80 + statusStyle: dot + - Media Tracking: + icon: sh-yamtrack.webp + description: Yamtrack + href: https://yamtrack.alexlebens.net + siteMonitor: http://yamtrack.yamtrack:80 + statusStyle: dot + - Youtube Archive: + icon: sh-tube-archivist-light.webp + description: TubeAchivist + href: https://tubearchivist.alexlebens.net/login + siteMonitor: http://tubearchivist.tubearchivist:80 + statusStyle: dot + - Photos: + icon: sh-immich.webp + description: Immich + href: https://immich.alexlebens.net + siteMonitor: http://immich-main.immich:2283 + statusStyle: dot + - Pictures: + icon: sh-photoview.webp + description: Photoview + href: https://photoview.alexlebens.net + siteMonitor: http://photoview.photoview:80 + statusStyle: dot + - Podcasts and Audiobooks: + icon: sh-audiobookshelf.webp + description: Audiobookshelf + href: https://audiobookshelf.alexlebens.net + siteMonitor: http://audiobookshelf.audiobookshelf:80 + statusStyle: dot + - Books: + icon: sh-booklore.webp + description: Booklore + href: https://booklore.alexlebens.net + siteMonitor: http://booklore.booklore:80 + statusStyle: dot + - Public: + - Site: + icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png + description: Profile Website + href: https://www.alexlebens.dev + siteMonitor: https://www.alexlebens.dev + statusStyle: dot + - Content Management: + icon: directus.png + description: Directus + href: https://directus.alexlebens.dev + siteMonitor: https://directus.alexlebens.dev + statusStyle: dot + - Social Media Management: + icon: sh-postiz.webp + description: Postiz + href: https://postiz.alexlebens.dev + siteMonitor: https://postiz.alexlebens.dev + statusStyle: dot + - Chat: + icon: sh-element.webp + description: Matrix + href: https://chat.alexlebens.dev + siteMonitor: https://chat.alexlebens.dev + statusStyle: dot + - Wiki: + icon: sh-outline.webp + description: Outline + href: https://wiki.alexlebens.dev + siteMonitor: https://wiki.alexlebens.dev + statusStyle: dot + - Passwords: + icon: sh-vaultwarden-light.webp + description: Vaultwarden + href: https://passwords.alexlebens.dev + siteMonitor: https://passwords.alexlebens.dev + statusStyle: dot + - Bookmarks: + icon: sh-karakeep-light.webp + description: Karakeep + href: https://karakeep.alexlebens.dev + siteMonitor: https://karakeep.alexlebens.dev + statusStyle: dot + - RSS: + icon: sh-freshrss.webp + description: FreshRSS + href: https://rss.alexlebens.dev + siteMonitor: https://rss.alexlebens.dev + statusStyle: dot + - Internal: + - Home Automation: + icon: sh-home-assistant.webp + description: Home Assistant + href: https://home-assistant.alexlebens.net + siteMonitor: http://home-assistant-main.home-assistant:80 + statusStyle: dot + - Budgeting: + icon: sh-actual-budget.webp + description: Actual + href: https://actual.alexlebens.net + siteMonitor: http://actual.actual:80 + statusStyle: dot + - AI: + icon: sh-ollama.webp + description: Ollama + href: https://ollama.alexlebens.net + siteMonitor: http://ollama-web.ollama:80 + statusStyle: dot + - AI Image: + icon: https://user-images.githubusercontent.com/36368048/196280761-1535f413-a91e-4b6a-af6a-b890f8ae204c.png + description: Stable Diffusion + href: https://stable-diffusion-pd05wd.boreal-beaufort.ts.net + siteMonitor: https://stable-diffusion-pd05wd.boreal-beaufort.ts.net + statusStyle: dot + - Search: + icon: sh-searxng.webp + description: Searxng + href: https://searxng.alexlebens.net/ + siteMonitor: http://searxng-browser.searxng:80 + statusStyle: dot + - Email: + icon: sh-roundcube.webp + description: Roundcube + href: https://mail.alexlebens.net + siteMonitor: http://roundcube.roundcube:80 + statusStyle: dot + - Wiki: + icon: sh-kiwix-light.webp + description: Kiwix + href: https://kiwix.alexlebens.net + siteMonitor: http://kiwix.kiwix:80 + statusStyle: dot + - Code: + - Code (Public): + icon: sh-gitea.webp + description: Gitea + href: https://gitea.alexlebens.dev + siteMonitor: https://gitea.alexlebens.dev + statusStyle: dot + - Code (Local): + icon: sh-gitea.webp + description: Gitea + href: https://gitea.alexlebens.net + siteMonitor: https://gitea.alexlebens.net + statusStyle: dot + - Code (ps10rp): + icon: sh-gitea.webp + description: Gitea + href: https://gitea-ps10rp.boreal-beaufort.ts.net + siteMonitor: https://gitea-ps10rp.boreal-beaufort.ts.net + statusStyle: dot + - IDE (Public): + icon: sh-visual-studio-code.webp + description: VS Code + href: https://codeserver.alexlebens.dev + siteMonitor: https://codeserver.alexlebens.dev + statusStyle: dot + - IDE (Home Assistant): + icon: sh-visual-studio-code.webp + description: Edit config for Home Assistant + href: https://home-assistant-code-server.alexlebens.net + siteMonitor: http://home-assistant-code-server.home-assistant:8443 + statusStyle: dot + - Continuous Deployment: + icon: sh-argo-cd.webp + description: ArgoCD + href: https://argocd.alexlebens.net + siteMonitor: http://argocd-server.argocd:80 + statusStyle: dot + - Docker Deployment: + icon: sh-komodo-light.webp + description: Komodo + href: https://komodo.alexlebens.net + siteMonitor: http://komodo-main.komodo:80 + statusStyle: dot + - Automation: + - Deployment Workflows: + icon: sh-argo-cd.webp + description: Argo Workflows + href: https://argo-workflows.alexlebens.net + siteMonitor: http://argo-workflows-server.argo-workflows:2746 + statusStyle: dot + - API Workflows: + icon: sh-n8n.webp + description: n8n + href: https://n8n.alexlebens.net + siteMonitor: http://n8n-main.n8n:80 + statusStyle: dot + - Jobs: + icon: https://raw.githubusercontent.com/mshade/kronic/main/static/android-chrome-192x192.png + description: Kronic + href: https://kronic.alexlebens.net + siteMonitor: http://kronic.kronic:80 + statusStyle: dot + - Uptime: + icon: sh-gatus.webp + description: Gatus + href: https://gatus.alexlebens.net + siteMonitor: http://gatus.gatus:80 + statusStyle: dot + - Tools: + icon: sh-omnitools.webp + description: OmniTools + href: https://omni-tools.alexlebens.net + siteMonitor: http://omni-tools.omni-tools:80 + statusStyle: dot + - Monitoring: + - Kubernetes: + icon: sh-headlamp.webp + description: Headlamp + href: https://headlamp.alexlebens.net + siteMonitor: http://headlamp.headlamp:80 + statusStyle: dot + - Network Monitoring: + icon: sh-cilium.webp + description: Hubble for Cilium + href: https://hubble.alexlebens.net + siteMonitor: http://hubble-ui.kube-system:80 + statusStyle: dot + - Dashboard: + icon: sh-grafana.webp + description: Grafana + href: https://grafana.alexlebens.net + siteMonitor: http://grafana-main-service.grafana-operator:3000/api/health + statusStyle: dot + - Metrics: + icon: sh-prometheus.webp + description: Prometheus + href: https://prometheus.alexlebens.net + siteMonitor: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090 + statusStyle: dot + widget: + type: prometheus + url: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090 + - Alerting: + icon: sh-prometheus-light.webp + description: Alertmanager + href: https://alertmanager.alexlebens.net + siteMonitor: http://kube-prometheus-stack-alertmanager.kube-prometheus-stack:9093 + statusStyle: dot + widget: + type: prometheusmetric + url: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090 + refreshInterval: 120s + metrics: + - label: Alerts Active + query: alertmanager_alerts{state="active"} + - label: Metric Database Size + query: prometheus_tsdb_storage_blocks_bytes + format: + type: bytes + - Tautulli: + icon: sh-tautulli.webp + description: Plex Monitoring + href: https://tautulli.alexlebens.net + siteMonitor: http://tautulli.tautulli:80 + statusStyle: dot + - Jellystat: + icon: sh-jellystat.webp + description: Jellyfin Monitoring + href: https://jellystat.alexlebens.net + siteMonitor: http://jellystat.jellystat:80 + statusStyle: dot + - Services: + - Auth (Public): + icon: sh-authentik.webp + description: Authentik + href: https://auth.alexlebens.dev + siteMonitor: https://auth.alexlebens.dev + statusStyle: dot + - Auth (Local): + icon: sh-authentik.webp + description: Authentik + href: https://authentik.alexlebens.net + siteMonitor: http://authentik-server.authentik:80 + statusStyle: dot + - Email: + icon: sh-stalwart.webp + description: Stalwart + href: https://stalwart.alexlebens.net + siteMonitor: http://stalwart.stalwart:80 + statusStyle: dot + - Notifications: + icon: sh-ntfy.webp + description: ntfy + href: https://ntfy.alexlebens.net + siteMonitor: http://ntfy.ntfy:80 + statusStyle: dot + - Reverse Proxy: + icon: sh-traefik.webp + description: Traefik + href: https://traefik-cl01tl.alexlebens.net/dashboard/#/ + siteMonitor: https://traefik-cl01tl.alexlebens.net/dashboard/#/ + statusStyle: dot + widget: + type: traefik + url: https://traefik-cl01tl.alexlebens.net + - Image Cache: + icon: sh-harbor.webp + description: Harbor + href: https://harbor.alexlebens.net + siteMonitor: http://harbor-portal.harbor:80 + statusStyle: dot + - Hardware: + - Network Management (alexlebens.net): + icon: sh-ubiquiti-unifi.webp + description: Unifi + href: https://unifi.alexlebens.net + siteMonitor: https://unifi.alexlebens.net + statusStyle: dot + - Network Attached Storage: + icon: sh-synology-light.webp + description: Synology + href: https://synology.alexlebens.net + siteMonitor: https://synology.alexlebens.net + statusStyle: dot + widget: + type: diskstation + url: https://synology.alexlebens.net + username: {{HOMEPAGE_VAR_SYNOLOGY_USER}} + password: {{HOMEPAGE_VAR_SYNOLOGY_PASSWORD}} + volume: volume_2 + - TV Tuner: + icon: sh-hdhomerun.webp + description: HD Homerun + href: http://hdhr.alexlebens.net + siteMonitor: http://hdhr.alexlebens.net + statusStyle: dot + widget: + type: hdhomerun + url: http://hdhr.alexlebens.net + tuner: 0 + fields: ["channels", "hd"] + - KVM: + icon: sh-pikvm-light.webp + description: Pi KVM + href: https://pikvm.alexlebens.net + siteMonitor: https://pikvm.alexlebens.net + statusStyle: dot + - Server Plug: + icon: sh-shelly.webp + description: Shelly + href: http://it05sp.alexlebens.net + siteMonitor: http://it05sp.alexlebens.net + statusStyle: dot + - Storage: + - Cluster Storage: + icon: sh-ceph.webp + description: Ceph + href: https://ceph.alexlebens.net + siteMonitor: http://rook-ceph-mgr-dashboard.rook-ceph:7000 + statusStyle: dot + - Object Storage (NAS): + icon: sh-garage.webp + description: Garage + href: https://garage-webui.alexlebens.net + siteMonitor: http://garage-webui.garage:3909 + statusStyle: dot + - Object Storage (ps10rp): + icon: sh-garage.webp + description: Garage + href: https://garage-ui-ps10rp.boreal-beaufort.ts.net + siteMonitor: https://garage-ui-ps10rp.boreal-beaufort.ts.net + statusStyle: dot + - Database: + icon: sh-pgadmin-light.webp + description: PGAdmin + href: https://pgadmin.alexlebens.net + siteMonitor: http://pgadmin.pgadmin:80 + statusStyle: dot + - Database: + icon: sh-whodb.webp + description: WhoDB + href: https://whodb.alexlebens.net + siteMonitor: http://whodb.whodb:80 + statusStyle: dot + - Secrets: + icon: sh-hashicorp-vault.webp + description: Vault + href: https://vault.alexlebens.net + siteMonitor: http://vault.vault:8200 + statusStyle: dot + - Backups: + icon: sh-backrest-light.webp + description: Backrest + href: https://backrest.alexlebens.net + siteMonitor: http://backrest.backrest:80 + statusStyle: dot + - Content: + - qUI: + icon: https://raw.githubusercontent.com/autobrr/qui/8487c818886df9abb2b1456f43b54e0ba180a2bd/web/public/icons.svg + description: qbitorrent + href: https://qui.alexlebens.net + siteMonitor: http://qbittorrent-qui.qbittorrent:80 + statusStyle: dot + widget: + type: qbittorrent + url: http://qbittorrent.qbittorrent:8080 + enableLeechProgress: true + - Prowlarr: + icon: sh-prowlarr.webp + description: Indexers + href: https://prowlarr.alexlebens.net + siteMonitor: http://prowlarr.prowlarr:80 + statusStyle: dot + - Huntarr: + icon: https://raw.githubusercontent.com/plexguide/Huntarr.io/main/frontend/static/logo/128.png + description: Content upgrader + href: https://huntarr.alexlebens.net + siteMonitor: http://huntarr.huntarr:80 + statusStyle: dot + - Bazarr: + icon: sh-bazarr.webp + description: Subtitles + href: https://bazarr.alexlebens.net + siteMonitor: http://bazarr.bazarr:80 + statusStyle: dot + - Tdarr: + icon: sh-tdarr.webp + description: Media transcoding and health checks + href: https://tdarr.alexlebens.net + siteMonitor: http://tdarr-web.tdarr:8265 + statusStyle: dot + widget: + type: tdarr + url: http://tdarr-web.tdarr:8265 + - TV Shows: + - Sonarr: + icon: sh-sonarr.webp + description: TV Shows + href: https://sonarr.alexlebens.net + siteMonitor: http://sonarr.sonarr:80 + statusStyle: dot + widget: + type: sonarr + url: http://sonarr.sonarr:80 + key: {{HOMEPAGE_VAR_SONARR_KEY}} + fields: ["wanted", "queued", "series"] + enableQueue: false + - Sonarr 4K: + icon: sh-sonarr.webp + description: TV Shows 4K + href: https://sonarr-4k.alexlebens.net + siteMonitor: http://sonarr-4k.sonarr-4k:80 + statusStyle: dot + widget: + type: sonarr + url: http://sonarr-4k.sonarr-4k:80 + key: {{HOMEPAGE_VAR_SONARR4K_KEY}} + fields: ["wanted", "queued", "series"] + enableQueue: false + - Sonarr Anime: + icon: sh-sonarr.webp + description: Anime Shows + href: https://sonarr-anime.alexlebens.net + siteMonitor: http://sonarr-anime.sonarr-anime:80 + statusStyle: dot + widget: + type: sonarr + url: http://sonarr-anime.sonarr-anime:80 + key: {{HOMEPAGE_VAR_SONARRANIME_KEY}} + fields: ["wanted", "queued", "series"] + enableQueue: false + - Movies: + - Radarr: + icon: sh-radarr.webp + description: Movies + href: https://radarr.alexlebens.net + siteMonitor: http://radarr.radarr:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr.radarr:80 + key: {{HOMEPAGE_VAR_RADARR_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Radarr 4K: + icon: sh-radarr-4k.webp + description: Movies 4K + href: https://radarr-4k.alexlebens.net + siteMonitor: http://radarr-4k.radarr-4k:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr-4k.radarr-4k:80 + key: {{HOMEPAGE_VAR_RADARR4K_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Radarr Anime: + icon: sh-radarr-anime.webp + description: Anime Movies + href: https://radarr-anime.alexlebens.net + siteMonitor: http://radarr-anime.radarr-anime:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr-anime.radarr-anime:80 + key: {{HOMEPAGE_VAR_RADARRANIME_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Radarr Stand Up: + icon: sh-radarr-light-hybrid.webp + description: Stand Up + href: https://radarr-standup.alexlebens.net + siteMonitor: http://radarr-standup.radarr-standup:80 + statusStyle: dot + widget: + type: radarr + url: http://radarr-standup.radarr-standup:80 + key: {{HOMEPAGE_VAR_RADARRSTANDUP_KEY}} + fields: ["wanted", "queued", "movies"] + enableQueue: false + - Music: + - Lidarr: + icon: sh-lidarr.webp + description: Music + href: https://lidarr.alexlebens.net + siteMonitor: http://lidarr.lidarr:80 + statusStyle: dot + widget: + type: lidarr + url: http://lidarr.lidarr:80 + key: {{HOMEPAGE_VAR_LIDARR_KEY}} + fields: ["wanted", "queued", "artists"] + - LidaTube: + icon: sh-lidatube.webp + description: Searches for Music + href: https://lidatube.alexlebens.net + siteMonitor: http://lidatube.lidatube:80 + statusStyle: dot + - Soulseek: + icon: sh-slskd.webp + description: slskd + href: https://slskd.alexlebens.net + siteMonitor: http://slskd.slskd:5030 + statusStyle: dot + - Books: + - Ephemera: + icon: sh-ephemera.webp + description: Books + href: https://ephemera.alexlebens.net + siteMonitor: http://ephemera.ephemera:80 + statusStyle: dot + - Listenarr: + icon: sh-audiobookrequest.webp + description: Audiobooks + href: https://listenarr.alexlebens.net + siteMonitor: http://listenarr.listenarr:80 + statusStyle: dot + - Other Homes: + - Dev: + icon: sh-homepage.webp + description: Public Homepage + href: https://home.alexlebens.dev + siteMonitor: https://home.alexlebens.dev + statusStyle: dot + - Lebens Home: + icon: sh-homepage.webp + description: Lebens Homepage + href: https://home-ps10rp.boreal-beaufort.ts.net + siteMonitor: https://home-ps10rp.boreal-beaufort.ts.net + statusStyle: dot + settings.yaml: | + favicon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.svg + headerStyle: clean + hideVersion: true + color: zinc + background: + image: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/background.jpg + brightness: 50 + theme: dark + disableCollapse: true + layout: + - Media: + tab: Applications + icon: mdi-multimedia-#ffffff + - Public: + tab: Applications + icon: mdi-earth-#ffffff + - Internal: + tab: Applications + icon: mdi-security-network-#ffffff + - Code: + tab: Tools + icon: mdi-code-block-braces-#ffffff + - Automation: + tab: Tools + icon: mdi-wrench-#ffffff + - Monitoring: + tab: Tools + icon: mdi-chart-line-#ffffff + - Services: + tab: Services + icon: mdi-toolbox-outline-#ffffff + - Hardware: + tab: Services + icon: mdi-server-network-#ffffff + - Storage: + tab: Services + icon: mdi-database-#ffffff + - Content: + tab: Services + icon: mdi-multimedia-#ffffff + - TV Shows: + tab: Content + icon: mdi-television-#ffffff + - Movies: + tab: Content + icon: mdi-filmstrip-#ffffff + - Music: + tab: Content + icon: mdi-music-box-multiple-#ffffff + - Books: + tab: Content + icon: mdi-book-open-variant-#ffffff + - External Services: + tab: Bookmarks + icon: mdi-cloud-#ffffff + - Other Homes: + tab: Bookmarks + icon: mdi-cloud-#ffffff + - Trackers: + tab: Bookmarks + icon: mdi-cloud-#ffffff + widgets.yaml: | + - logo: + icon: https://web-assets-3bfcb5585cbd63dc365d32a3.nyc3.cdn.digitaloceanspaces.com/alexlebens-net/logo-new-round.png + - kubernetes: + cluster: + show: true + cpu: true + memory: true + showLabel: false + label: "Cluster" + nodes: + show: false + - datetime: + text_size: xl + format: + dateStyle: long + timeStyle: short + hour12: false + - openmeteo: + label: St. Paul + latitude: 44.954445 + longitude: -93.091301 + timezone: America/Chicago + units: metric + cache: 5 + format: + maximumFractionDigits: 0 +--- +# Source: homepage/templates/cluster-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: homepage + namespace: homepage + labels: + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - nodes + verbs: + - get + - list + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - traefik.io + resources: + - ingressroutes + verbs: + - get + - list + - apiGroups: + - gateway.networking.k8s.io + resources: + - httproutes + - gateways + verbs: + - get + - list + - apiGroups: + - metrics.k8s.io + resources: + - nodes + - pods + verbs: + - get + - list +--- +# Source: homepage/templates/cluster-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: homepage + namespace: homepage + labels: + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: homepage +subjects: + - kind: ServiceAccount + name: homepage + namespace: homepage +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: homepage + labels: + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + app.kubernetes.io/service: homepage + helm.sh/chart: homepage-4.4.0 + namespace: homepage +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage + app.kubernetes.io/name: homepage +--- +# Source: homepage/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: gitea-ps10rp + namespace: homepage + labels: + app.kubernetes.io/name: gitea-ps10rp + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage + annotations: + tailscale.com/tailnet-fqdn: gitea-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: homepage/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: home-ps10rp + namespace: homepage + labels: + app.kubernetes.io/name: home-ps10rp + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage + annotations: + tailscale.com/tailnet-fqdn: home-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: homepage/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: garage-ui-ps10rp + namespace: homepage + labels: + app.kubernetes.io/name: garage-ps10rp + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage + annotations: + tailscale.com/tailnet-fqdn: garage-ui-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: homepage/charts/homepage/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: homepage + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: homepage + helm.sh/chart: homepage-4.4.0 + annotations: + reloader.stakater.com/auto: "true" + namespace: homepage +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: homepage + app.kubernetes.io/instance: homepage + template: + metadata: + annotations: + checksum/configMaps: 5b025903635dfc4abfcdb07fac7674f70d46a2d7bbeeeb1c7cd95e68e03f53ea + checksum/secrets: d3ba83f111cd32f92c909268c55ad8bbd4f9e299b74b35b33c1a011180d8b378 + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: homepage + app.kubernetes.io/name: homepage + spec: + enableServiceLinks: false + serviceAccountName: homepage + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: HOMEPAGE_ALLOWED_HOSTS + value: home.alexlebens.net + envFrom: + - secretRef: + name: homepage-keys-secret + image: ghcr.io/gethomepage/homepage:v1.7.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /app/config/bookmarks.yaml + mountPropagation: None + name: config + readOnly: true + subPath: bookmarks.yaml + - mountPath: /app/config/docker.yaml + mountPropagation: None + name: config + readOnly: true + subPath: docker.yaml + - mountPath: /app/config/kubernetes.yaml + mountPropagation: None + name: config + readOnly: true + subPath: kubernetes.yaml + - mountPath: /app/config/services.yaml + mountPropagation: None + name: config + readOnly: true + subPath: services.yaml + - mountPath: /app/config/settings.yaml + mountPropagation: None + name: config + readOnly: true + subPath: settings.yaml + - mountPath: /app/config/widgets.yaml + mountPropagation: None + name: config + readOnly: true + subPath: widgets.yaml + volumes: + - configMap: + name: homepage + name: config +--- +# Source: homepage/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: homepage-keys-secret + namespace: homepage + labels: + app.kubernetes.io/name: homepage-keys-secret + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: HOMEPAGE_VAR_SYNOLOGY_USER + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /synology/auth/cl01tl + metadataPolicy: None + property: user + - secretKey: HOMEPAGE_VAR_SYNOLOGY_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /synology/auth/cl01tl + metadataPolicy: None + property: password + - secretKey: HOMEPAGE_VAR_UNIFI_USER + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /unifi/auth/cl01tl + metadataPolicy: None + property: user + - secretKey: HOMEPAGE_VAR_UNIFI_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /unifi/auth/cl01tl + metadataPolicy: None + property: password + - secretKey: HOMEPAGE_VAR_SONARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/sonarr4/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_SONARR4K_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/sonarr4-4k/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_SONARRANIME_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/sonarr4-anime/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARR4K_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5-4k/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARRANIME_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5-anime/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_RADARRSTANDUP_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/radarr5-standup/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_LIDARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/lidarr2/key + metadataPolicy: None + property: key + - secretKey: HOMEPAGE_VAR_PROWLARR_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/prowlarr/key + metadataPolicy: None + property: key +--- +# Source: homepage/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-homepage + namespace: homepage + labels: + app.kubernetes.io/name: http-route-homepage + app.kubernetes.io/instance: homepage + app.kubernetes.io/part-of: homepage +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - home.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: homepage + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/jellystat/jellystat.yaml b/clusters/cl01tl/manifests/jellystat/jellystat.yaml new file mode 100644 index 000000000..96df352ba --- /dev/null +++ b/clusters/cl01tl/manifests/jellystat/jellystat.yaml @@ -0,0 +1,861 @@ +--- +# Source: jellystat/charts/jellystat/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: jellystat-data + labels: + app.kubernetes.io/instance: jellystat + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellystat + helm.sh/chart: jellystat-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: jellystat +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: jellystat/charts/jellystat/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: jellystat + labels: + app.kubernetes.io/instance: jellystat + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellystat + app.kubernetes.io/service: jellystat + helm.sh/chart: jellystat-4.4.0 + namespace: jellystat +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellystat + app.kubernetes.io/name: jellystat +--- +# Source: jellystat/charts/jellystat/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jellystat + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellystat + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: jellystat + helm.sh/chart: jellystat-4.4.0 + namespace: jellystat +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: jellystat + app.kubernetes.io/instance: jellystat + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: jellystat + app.kubernetes.io/name: jellystat + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: JWT_SECRET + valueFrom: + secretKeyRef: + key: secret-key + name: jellystat-secret + - name: JS_USER + valueFrom: + secretKeyRef: + key: user + name: jellystat-secret + - name: JS_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: jellystat-secret + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + key: username + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + key: dbname + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_IP + valueFrom: + secretKeyRef: + key: host + name: jellystat-postgresql-17-cluster-app + - name: POSTGRES_PORT + valueFrom: + secretKeyRef: + key: port + name: jellystat-postgresql-17-cluster-app + image: cyfershepard/jellystat:1.1.6 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /app/backend/backup-data + name: data + volumes: + - name: data + persistentVolumeClaim: + claimName: jellystat-data +--- +# Source: jellystat/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: jellystat-postgresql-17-cluster + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "jellystat-postgresql-17-external-backup" + serverName: "jellystat-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "jellystat-postgresql-17-garage-local-backup" + serverName: "jellystat-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "jellystat-postgresql-17-recovery" + serverName: jellystat-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: jellystat-postgresql-17-backup-1 + + externalClusters: + - name: jellystat-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "jellystat-postgresql-17-recovery" + serverName: jellystat-postgresql-17-backup-1 +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-secret + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-secret + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: secret-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/jellystat/auth + metadataPolicy: None + property: secret-key + - secretKey: user + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/jellystat/auth + metadataPolicy: None + property: user + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/jellystat/auth + metadataPolicy: None + property: password +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-data-backup-secret + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-data-backup-secret + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/jellystat/jellystat-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-postgresql-17-cluster-backup-secret + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: jellystat/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: jellystat-postgresql-17-cluster-backup-secret-garage + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: jellystat/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-jellystat + namespace: jellystat + labels: + app.kubernetes.io/name: http-route-jellystat + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - jellystat.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: jellystat + port: 80 + weight: 100 +--- +# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "jellystat-postgresql-17-external-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/jellystat/jellystat-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: jellystat-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: jellystat-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "jellystat-postgresql-17-garage-local-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: jellystat/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "jellystat-postgresql-17-recovery" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/jellystat/jellystat-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: jellystat-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: jellystat/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: jellystat-postgresql-17-alert-rules + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/jellystat-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 1 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="jellystat"} - cnpg_pg_replication_is_wal_receiver_up{namespace="jellystat"}) < 2 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="jellystat", persistentvolumeclaim=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="jellystat",pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "jellystat/jellystat-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="jellystat", pod=~"jellystat-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: jellystat + cnpg_cluster: jellystat-postgresql-17-cluster +--- +# Source: jellystat/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: jellystat-data-backup-source + namespace: jellystat + labels: + app.kubernetes.io/name: jellystat-data-backup-source + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat +spec: + sourcePVC: jellystat-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: jellystat-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "jellystat-postgresql-17-daily-backup-scheduled-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: jellystat-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "jellystat-postgresql-17-external-backup" +--- +# Source: jellystat/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "jellystat-postgresql-17-live-backup-scheduled-backup" + namespace: jellystat + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: jellystat-postgresql-17 + app.kubernetes.io/instance: jellystat + app.kubernetes.io/part-of: jellystat + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: jellystat-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "jellystat-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/lidatube/lidatube.yaml b/clusters/cl01tl/manifests/lidatube/lidatube.yaml new file mode 100644 index 000000000..d9eb9fdc7 --- /dev/null +++ b/clusters/cl01tl/manifests/lidatube/lidatube.yaml @@ -0,0 +1,221 @@ +--- +# Source: lidatube/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: lidatube-nfs-storage + namespace: lidatube + labels: + app.kubernetes.io/name: lidatube-nfs-storage + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Music + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: lidatube/charts/lidatube/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: lidatube-config + labels: + app.kubernetes.io/instance: lidatube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidatube + helm.sh/chart: lidatube-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: lidatube +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: lidatube/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lidatube-nfs-storage + namespace: lidatube + labels: + app.kubernetes.io/name: lidatube-nfs-storage + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + volumeName: lidatube-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: lidatube/charts/lidatube/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: lidatube + labels: + app.kubernetes.io/instance: lidatube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidatube + app.kubernetes.io/service: lidatube + helm.sh/chart: lidatube-4.4.0 + namespace: lidatube +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidatube + app.kubernetes.io/name: lidatube +--- +# Source: lidatube/charts/lidatube/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lidatube + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidatube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: lidatube + helm.sh/chart: lidatube-4.4.0 + namespace: lidatube +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: lidatube + app.kubernetes.io/instance: lidatube + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: lidatube + app.kubernetes.io/name: lidatube + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: lidarr_address + value: http://lidarr.lidarr:80 + - name: lidarr_api_key + valueFrom: + secretKeyRef: + key: lidarr_api_key + name: lidatube-secret + - name: sleep_interval + value: "360" + - name: sync_schedule + value: "4" + - name: attempt_lidarr_import + value: "true" + image: thewicklowwolf/lidatube:0.2.41 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /lidatube/config + name: config + - mountPath: /lidatube/downloads + name: music + volumes: + - name: config + persistentVolumeClaim: + claimName: lidatube-config + - name: music + persistentVolumeClaim: + claimName: lidatube-nfs-storage +--- +# Source: lidatube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: lidatube-secret + namespace: lidatube + labels: + app.kubernetes.io/name: lidatube-secret + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: lidarr_api_key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/lidarr2/key + metadataPolicy: None + property: key +--- +# Source: lidatube/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-lidatube + namespace: lidatube + labels: + app.kubernetes.io/name: http-route-lidatube + app.kubernetes.io/instance: lidatube + app.kubernetes.io/part-of: lidatube +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - lidatube.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: lidatube + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/listenarr/listenarr.yaml b/clusters/cl01tl/manifests/listenarr/listenarr.yaml new file mode 100644 index 000000000..001012d3a --- /dev/null +++ b/clusters/cl01tl/manifests/listenarr/listenarr.yaml @@ -0,0 +1,180 @@ +--- +# Source: listenarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: listenarr-nfs-storage + namespace: listenarr + labels: + app.kubernetes.io/name: listenarr-nfs-storage + app.kubernetes.io/instance: listenarr + app.kubernetes.io/part-of: listenarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Audiobooks + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: listenarr/charts/listenarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: listenarr + labels: + app.kubernetes.io/instance: listenarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: listenarr + helm.sh/chart: listenarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: listenarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: listenarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: listenarr-nfs-storage + namespace: listenarr + labels: + app.kubernetes.io/name: listenarr-nfs-storage + app.kubernetes.io/instance: listenarr + app.kubernetes.io/part-of: listenarr +spec: + volumeName: listenarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: listenarr/charts/listenarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: listenarr + labels: + app.kubernetes.io/instance: listenarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: listenarr + app.kubernetes.io/service: listenarr + helm.sh/chart: listenarr-4.4.0 + namespace: listenarr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: listenarr + app.kubernetes.io/name: listenarr +--- +# Source: listenarr/charts/listenarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: listenarr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: listenarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: listenarr + helm.sh/chart: listenarr-4.4.0 + namespace: listenarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: listenarr + app.kubernetes.io/instance: listenarr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: listenarr + app.kubernetes.io/name: listenarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: LISTENARR_PUBLIC_URL + value: https://listenarr.alexlebens.net + image: therobbiedavis/listenarr:canary-0.2.35 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 128Mi + volumeMounts: + - mountPath: /app/config + name: config + - mountPath: /data + name: media + volumes: + - name: config + persistentVolumeClaim: + claimName: listenarr + - name: media + persistentVolumeClaim: + claimName: listenarr-nfs-storage +--- +# Source: listenarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-listenarr + namespace: listenarr + labels: + app.kubernetes.io/name: http-route-listenarr + app.kubernetes.io/instance: listenarr + app.kubernetes.io/part-of: listenarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - listenarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: listenarr + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/omni-tools/omni-tools.yaml b/clusters/cl01tl/manifests/omni-tools/omni-tools.yaml new file mode 100644 index 000000000..03b280a74 --- /dev/null +++ b/clusters/cl01tl/manifests/omni-tools/omni-tools.yaml @@ -0,0 +1,100 @@ +--- +# Source: omni-tools/charts/omni-tools/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: omni-tools + labels: + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: omni-tools + app.kubernetes.io/service: omni-tools + helm.sh/chart: omni-tools-4.4.0 + namespace: omni-tools +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/name: omni-tools +--- +# Source: omni-tools/charts/omni-tools/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: omni-tools + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: omni-tools + helm.sh/chart: omni-tools-4.4.0 + namespace: omni-tools +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: omni-tools + app.kubernetes.io/instance: omni-tools + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/name: omni-tools + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: iib0011/omni-tools:0.6.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 50m + memory: 512Mi +--- +# Source: omni-tools/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-omni-tools + namespace: omni-tools + labels: + app.kubernetes.io/name: http-route-omni-tools + app.kubernetes.io/instance: omni-tools + app.kubernetes.io/part-of: omni-tools +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - omni-tools.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: omni-tools + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/outline/outline.yaml b/clusters/cl01tl/manifests/outline/outline.yaml new file mode 100644 index 000000000..8334e69b2 --- /dev/null +++ b/clusters/cl01tl/manifests/outline/outline.yaml @@ -0,0 +1,988 @@ +--- +# Source: outline/charts/outline/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: outline + labels: + app.kubernetes.io/instance: outline + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: outline + app.kubernetes.io/service: outline + helm.sh/chart: outline-4.4.0 + namespace: outline +spec: + type: ClusterIP + ports: + - port: 3000 + targetPort: 3000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/name: outline +--- +# Source: outline/charts/cloudflared-outline/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: outline-cloudflared-outline + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-outline + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-outline-1.23.0 + namespace: outline +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-outline + app.kubernetes.io/instance: outline + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/name: cloudflared-outline + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: outline-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: outline/charts/outline/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: outline + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: outline + helm.sh/chart: outline-4.4.0 + namespace: outline +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: outline + app.kubernetes.io/instance: outline + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: outline + app.kubernetes.io/name: outline + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: NODE_ENV + value: production + - name: URL + value: https://wiki.alexlebens.dev + - name: PORT + value: "3000" + - name: SECRET_KEY + valueFrom: + secretKeyRef: + key: secret-key + name: outline-key-secret + - name: UTILS_SECRET + valueFrom: + secretKeyRef: + key: utils-key + name: outline-key-secret + - name: POSTGRES_USERNAME + valueFrom: + secretKeyRef: + key: username + name: outline-postgresql-17-cluster-app + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: outline-postgresql-17-cluster-app + - name: POSTGRES_DATABASE_NAME + valueFrom: + secretKeyRef: + key: dbname + name: outline-postgresql-17-cluster-app + - name: POSTGRES_DATABASE_HOST + valueFrom: + secretKeyRef: + key: host + name: outline-postgresql-17-cluster-app + - name: POSTGRES_DATABASE_PORT + valueFrom: + secretKeyRef: + key: port + name: outline-postgresql-17-cluster-app + - name: DATABASE_URL + value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME) + - name: DATABASE_URL_TEST + value: postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test + - name: DATABASE_CONNECTION_POOL_MIN + value: "2" + - name: DATABASE_CONNECTION_POOL_MAX + value: "20" + - name: PGSSLMODE + value: disable + - name: REDIS_URL + value: redis://redis-replication-outline-master.outline:6379 + - name: FILE_STORAGE + value: s3 + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: ceph-bucket-outline + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: ceph-bucket-outline + - name: AWS_REGION + value: us-east-1 + - name: AWS_S3_UPLOAD_BUCKET_NAME + valueFrom: + configMapKeyRef: + key: BUCKET_NAME + name: ceph-bucket-outline + - name: AWS_S3_UPLOAD_BUCKET_URL + value: https://objects.alexlebens.dev + - name: AWS_S3_FORCE_PATH_STYLE + value: "true" + - name: AWS_S3_ACL + value: private + - name: FILE_STORAGE_UPLOAD_MAX_SIZE + value: "26214400" + - name: FORCE_HTTPS + value: "false" + - name: ENABLE_UPDATES + value: "false" + - name: WEB_CONCURRENCY + value: "1" + - name: FILE_STORAGE_IMPORT_MAX_SIZE + value: "5.12e+06" + - name: LOG_LEVEL + value: info + - name: DEFAULT_LANGUAGE + value: en_US + - name: RATE_LIMITER_ENABLED + value: "false" + - name: DEVELOPMENT_UNSAFE_INLINE_CSP + value: "false" + - name: OIDC_CLIENT_ID + valueFrom: + secretKeyRef: + key: client + name: outline-oidc-secret + - name: OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: secret + name: outline-oidc-secret + - name: OIDC_AUTH_URI + value: https://auth.alexlebens.dev/application/o/authorize/ + - name: OIDC_TOKEN_URI + value: https://auth.alexlebens.dev/application/o/token/ + - name: OIDC_USERINFO_URI + value: https://auth.alexlebens.dev/application/o/userinfo/ + - name: OIDC_USERNAME_CLAIM + value: email + - name: OIDC_DISPLAY_NAME + value: Authentik + - name: OIDC_SCOPES + value: openid profile email + image: outlinewiki/outline:1.1.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 512Mi +--- +# Source: outline/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: outline-postgresql-17-cluster + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "outline-postgresql-17-external-backup" + serverName: "outline-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "outline-postgresql-17-garage-local-backup" + serverName: "outline-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "outline-postgresql-17-recovery" + serverName: outline-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: outline-postgresql-17-backup-1 + + externalClusters: + - name: outline-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "outline-postgresql-17-recovery" + serverName: outline-postgresql-17-backup-1 +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-key-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-key-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: secret-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/outline/key + metadataPolicy: None + property: secret-key + - secretKey: utils-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/outline/key + metadataPolicy: None + property: utils-key +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-oidc-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-oidc-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: client + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/outline + metadataPolicy: None + property: client + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/outline + metadataPolicy: None + property: secret +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-cloudflared-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-cloudflared-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/outline + metadataPolicy: None + property: token +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-postgresql-17-cluster-backup-secret + namespace: outline + labels: + app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: outline/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: outline-postgresql-17-cluster-backup-secret-garage + namespace: outline + labels: + app.kubernetes.io/name: outline-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: outline/templates/object-bucket-claim.yaml +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: ceph-bucket-outline + labels: + app.kubernetes.io/name: ceph-bucket-outline + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + generateBucketName: bucket-outline + storageClassName: ceph-bucket + additionalConfig: + bucketPolicy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor", + "Effect": "Allow", + "Action": [ + "s3:GetObjectAcl", + "s3:DeleteObject", + "s3:PutObject", + "s3:GetObject", + "s3:PutObjectAcl" + ], + "Resource": "arn:aws:s3:::bucket-outline-630c57e0-d475-4d78-926c-c1c082291d73/*" + } + ] + } +--- +# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "outline-postgresql-17-external-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/outline/outline-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: outline-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: outline-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "outline-postgresql-17-garage-local-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: outline/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "outline-postgresql-17-recovery" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/outline/outline-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: outline-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: outline/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: outline-postgresql-17-alert-rules + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/outline-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 1 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="outline"} - cnpg_pg_replication_is_wal_receiver_up{namespace="outline"}) < 2 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="outline", persistentvolumeclaim=~"outline-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="outline",pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "outline/outline-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="outline", pod=~"outline-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: outline + cnpg_cluster: outline-postgresql-17-cluster +--- +# Source: outline/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-outline + namespace: outline + labels: + app.kubernetes.io/name: redis-replication-outline + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "outline-postgresql-17-daily-backup-scheduled-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: outline-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "outline-postgresql-17-external-backup" +--- +# Source: outline/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "outline-postgresql-17-live-backup-scheduled-backup" + namespace: outline + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: outline-postgresql-17 + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: outline-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "outline-postgresql-17-garage-local-backup" +--- +# Source: outline/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-outline + namespace: outline + labels: + app.kubernetes.io/name: redis-replication-outline + app.kubernetes.io/instance: outline + app.kubernetes.io/part-of: outline + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/overseerr/overseerr.yaml b/clusters/cl01tl/manifests/overseerr/overseerr.yaml new file mode 100644 index 000000000..3e45d4702 --- /dev/null +++ b/clusters/cl01tl/manifests/overseerr/overseerr.yaml @@ -0,0 +1,215 @@ +--- +# Source: overseerr/charts/app-template/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: overseerr-main + labels: + app.kubernetes.io/instance: overseerr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: overseerr + helm.sh/chart: app-template-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: overseerr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: overseerr/charts/app-template/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: overseerr + labels: + app.kubernetes.io/instance: overseerr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: overseerr + app.kubernetes.io/service: overseerr + helm.sh/chart: app-template-4.4.0 + namespace: overseerr +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5055 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: overseerr + app.kubernetes.io/name: overseerr +--- +# Source: overseerr/charts/app-template/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: overseerr + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: overseerr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: overseerr + helm.sh/chart: app-template-4.4.0 + namespace: overseerr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: overseerr + app.kubernetes.io/instance: overseerr + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: overseerr + app.kubernetes.io/name: overseerr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + image: ghcr.io/sct/overseerr:1.34.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 512Mi + volumeMounts: + - mountPath: /app/config + name: main + volumes: + - name: main + persistentVolumeClaim: + claimName: overseerr-main +--- +# Source: overseerr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: overseerr-main-backup-secret + namespace: overseerr + labels: + app.kubernetes.io/name: overseerr-main-backup-secret + app.kubernetes.io/instance: overseerr + app.kubernetes.io/part-of: overseerr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/overseerr/overseerr-main" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: overseerr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-overseerr + namespace: overseerr + labels: + app.kubernetes.io/name: http-route-overseerr + app.kubernetes.io/instance: overseerr + app.kubernetes.io/part-of: overseerr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - overseerr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: overseerr + port: 80 + weight: 100 +--- +# Source: overseerr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: overseerr-main-backup-source + namespace: overseerr + labels: + app.kubernetes.io/name: overseerr-main-backup-source + app.kubernetes.io/instance: overseerr + app.kubernetes.io/part-of: overseerr +spec: + sourcePVC: overseerr-main + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: overseerr-main-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot diff --git a/clusters/cl01tl/manifests/photoview/photoview.yaml b/clusters/cl01tl/manifests/photoview/photoview.yaml new file mode 100644 index 000000000..6ae0d2dc8 --- /dev/null +++ b/clusters/cl01tl/manifests/photoview/photoview.yaml @@ -0,0 +1,773 @@ +--- +# Source: photoview/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: photoview-nfs-storage + namespace: photoview + labels: + app.kubernetes.io/name: photoview-nfs-storage + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage/Pictures + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: photoview/charts/photoview/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: photoview-cache + labels: + app.kubernetes.io/instance: photoview + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: photoview + helm.sh/chart: photoview-4.4.0 + namespace: photoview +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: photoview/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: photoview-nfs-storage + namespace: photoview + labels: + app.kubernetes.io/name: photoview-nfs-storage + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + volumeName: photoview-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: photoview/charts/photoview/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: photoview + labels: + app.kubernetes.io/instance: photoview + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: photoview + app.kubernetes.io/service: photoview + helm.sh/chart: photoview-4.4.0 + namespace: photoview +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: photoview + app.kubernetes.io/name: photoview +--- +# Source: photoview/charts/photoview/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: photoview + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: photoview + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: photoview + helm.sh/chart: photoview-4.4.0 + namespace: photoview +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: photoview + app.kubernetes.io/instance: photoview + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: photoview + app.kubernetes.io/name: photoview + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + initContainers: + - command: + - /bin/sh + - -ec + - | + /bin/chown -R 999:999 /app/cache + image: busybox:1.37.0 + imagePullPolicy: IfNotPresent + name: init-chmod-data + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /app/cache + name: cache + containers: + - env: + - name: PHOTOVIEW_DATABASE_DRIVER + value: postgres + - name: PHOTOVIEW_POSTGRES_URL + valueFrom: + secretKeyRef: + key: uri + name: photoview-postgresql-17-cluster-app + - name: PHOTOVIEW_MEDIA_CACHE + value: /app/cache + image: photoview/photoview:2.4.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 512Mi + volumeMounts: + - mountPath: /app/cache + name: cache + - mountPath: /photos + name: media + readOnly: true + volumes: + - name: cache + persistentVolumeClaim: + claimName: photoview-cache + - name: media + persistentVolumeClaim: + claimName: photoview-nfs-storage +--- +# Source: photoview/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: photoview-postgresql-17-cluster + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "photoview-postgresql-17-external-backup" + serverName: "photoview-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "photoview-postgresql-17-garage-local-backup" + serverName: "photoview-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "photoview-postgresql-17-recovery" + serverName: photoview-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: photoview-postgresql-17-backup-1 + + externalClusters: + - name: photoview-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "photoview-postgresql-17-recovery" + serverName: photoview-postgresql-17-backup-1 +--- +# Source: photoview/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: photoview-postgresql-17-cluster-backup-secret + namespace: photoview + labels: + app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: photoview/templates/external-secrets.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: photoview-postgresql-17-cluster-backup-secret-garage + namespace: photoview + labels: + app.kubernetes.io/name: photoview-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: photoview/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-photoview + namespace: photoview + labels: + app.kubernetes.io/name: http-route-photoview + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - photoview.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: photoview + port: 80 + weight: 100 +--- +# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "photoview-postgresql-17-external-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/photoview/photoview-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: photoview-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: photoview-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "photoview-postgresql-17-garage-local-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: photoview/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "photoview-postgresql-17-recovery" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/photoview/photoview-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: photoview-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: photoview/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: photoview-postgresql-17-alert-rules + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/photoview-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 1 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="photoview"} - cnpg_pg_replication_is_wal_receiver_up{namespace="photoview"}) < 2 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="photoview", persistentvolumeclaim=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="photoview",pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "photoview/photoview-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="photoview", pod=~"photoview-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: photoview + cnpg_cluster: photoview-postgresql-17-cluster +--- +# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "photoview-postgresql-17-daily-backup-scheduled-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: photoview-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "photoview-postgresql-17-external-backup" +--- +# Source: photoview/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "photoview-postgresql-17-live-backup-scheduled-backup" + namespace: photoview + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: photoview-postgresql-17 + app.kubernetes.io/instance: photoview + app.kubernetes.io/part-of: photoview + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: photoview-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "photoview-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/plex/plex.yaml b/clusters/cl01tl/manifests/plex/plex.yaml new file mode 100644 index 000000000..a05f2bcc7 --- /dev/null +++ b/clusters/cl01tl/manifests/plex/plex.yaml @@ -0,0 +1,190 @@ +--- +# Source: plex/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: plex-nfs-storage + namespace: plex + labels: + app.kubernetes.io/name: plex-nfs-storage + app.kubernetes.io/instance: plex + app.kubernetes.io/part-of: plex +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: plex/charts/plex/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: plex-config + labels: + app.kubernetes.io/instance: plex + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: plex + helm.sh/chart: plex-4.4.0 + namespace: plex +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "100Gi" + storageClassName: "ceph-block" +--- +# Source: plex/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: plex-nfs-storage + namespace: plex + labels: + app.kubernetes.io/name: plex-nfs-storage + app.kubernetes.io/instance: plex + app.kubernetes.io/part-of: plex +spec: + volumeName: plex-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: plex/charts/plex/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: plex + labels: + app.kubernetes.io/instance: plex + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: plex + app.kubernetes.io/service: plex + helm.sh/chart: plex-4.4.0 + namespace: plex +spec: + type: LoadBalancer + ports: + - port: 32400 + targetPort: 32400 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: plex + app.kubernetes.io/name: plex +--- +# Source: plex/charts/plex/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: plex + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: plex + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: plex + helm.sh/chart: plex-4.4.0 + namespace: plex +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: plex + app.kubernetes.io/instance: plex + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: plex + app.kubernetes.io/name: plex + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: VERSION + value: docker + - name: PLEX_CLAIM + value: claim-XmGK2o9x54PbCzQaqj-J + image: ghcr.io/linuxserver/plex:1.42.2@sha256:ab81c7313fb5dc4d1f9562e5bbd5e5877a8a3c5ca6b9f9fff3437b5096a2b123 + imagePullPolicy: IfNotPresent + name: main + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 10m + gpu.intel.com/i915: 1 + memory: 512Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /mnt/store + name: media + readOnly: true + - mountPath: /transcode + name: transcode + volumes: + - name: config + persistentVolumeClaim: + claimName: plex-config + - name: media + persistentVolumeClaim: + claimName: plex-nfs-storage + - emptyDir: {} + name: transcode +--- +# Source: plex/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-plex + namespace: plex + labels: + app.kubernetes.io/name: http-route-plex + app.kubernetes.io/instance: plex + app.kubernetes.io/part-of: plex +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - plex.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: plex + port: 32400 + weight: 100 diff --git a/clusters/cl01tl/manifests/postiz/postiz.yaml b/clusters/cl01tl/manifests/postiz/postiz.yaml new file mode 100644 index 000000000..767db4ca6 --- /dev/null +++ b/clusters/cl01tl/manifests/postiz/postiz.yaml @@ -0,0 +1,1180 @@ +--- +# Source: postiz/charts/postiz/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postiz-config + labels: + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + helm.sh/chart: postiz-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: postiz +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "2Gi" + storageClassName: "ceph-block" +--- +# Source: postiz/charts/postiz/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postiz-uploads + labels: + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + helm.sh/chart: postiz-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: postiz +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" + storageClassName: "ceph-block" +--- +# Source: postiz/charts/postiz/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: postiz + labels: + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + app.kubernetes.io/service: postiz + helm.sh/chart: postiz-4.4.0 + namespace: postiz +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 5000 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/name: postiz +--- +# Source: postiz/charts/cloudflared/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postiz-cloudflared-postiz + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-postiz + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-1.23.0 + namespace: postiz +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-postiz + app.kubernetes.io/instance: postiz + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/name: cloudflared-postiz + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: postiz-cloudflared-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: postiz/charts/postiz/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postiz + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: postiz + helm.sh/chart: postiz-4.4.0 + namespace: postiz +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: postiz + app.kubernetes.io/instance: postiz + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: postiz + app.kubernetes.io/name: postiz + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: MAIN_URL + value: https://postiz.alexlebens.dev + - name: FRONTEND_URL + value: https://postiz.alexlebens.dev + - name: NEXT_PUBLIC_BACKEND_URL + value: https://postiz.alexlebens.dev/api + - name: JWT_SECRET + valueFrom: + secretKeyRef: + key: JWT_SECRET + name: postiz-config-secret + - name: DATABASE_URL + valueFrom: + secretKeyRef: + key: uri + name: postiz-postgresql-17-cluster-app + - name: REDIS_URL + valueFrom: + secretKeyRef: + key: REDIS_URL + name: postiz-redis-config + - name: BACKEND_INTERNAL_URL + value: http://localhost:3000 + - name: IS_GENERAL + value: "true" + - name: STORAGE_PROVIDER + value: local + - name: UPLOAD_DIRECTORY + value: /uploads + - name: NEXT_PUBLIC_UPLOAD_DIRECTORY + value: /uploads + - name: NEXT_PUBLIC_POSTIZ_OAUTH_DISPLAY_NAME + value: Authentik + - name: NEXT_PUBLIC_POSTIZ_OAUTH_LOGO_URL + value: https://cdn.jsdelivr.net/gh/selfhst/icons/png/authentik.png + - name: POSTIZ_GENERIC_OAUTH + value: "true" + - name: POSTIZ_OAUTH_URL + value: https://auth.alexlebens.dev + - name: POSTIZ_OAUTH_AUTH_URL + value: https://auth.alexlebens.dev/application/o/authorize/ + - name: POSTIZ_OAUTH_TOKEN_URL + value: https://auth.alexlebens.dev/application/o/token/ + - name: POSTIZ_OAUTH_USERINFO_URL + value: https://auth.alexlebens.dev/application/o/userinfo/ + - name: POSTIZ_OAUTH_CLIENT_ID + valueFrom: + secretKeyRef: + key: client + name: postiz-oidc-secret + - name: POSTIZ_OAUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: secret + name: postiz-oidc-secret + - name: POSTIZ_OAUTH_SCOPE + value: openid profile email + image: ghcr.io/gitroomhq/postiz-app:v2.8.3 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /config + name: config + - mountPath: /uploads + name: uploads + volumes: + - name: config + persistentVolumeClaim: + claimName: postiz-config + - name: uploads + persistentVolumeClaim: + claimName: postiz-uploads +--- +# Source: postiz/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postiz-postgresql-17-cluster + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "postiz-postgresql-17-external-backup" + serverName: "postiz-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "postiz-postgresql-17-garage-local-backup" + serverName: "postiz-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "postiz-postgresql-17-recovery" + serverName: postiz-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: postiz-postgresql-17-backup-1 + + externalClusters: + - name: postiz-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "postiz-postgresql-17-recovery" + serverName: postiz-postgresql-17-backup-1 +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-config-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-config-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: JWT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/config + metadataPolicy: None + property: JWT_SECRET +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-redis-config + namespace: postiz + labels: + app.kubernetes.io/name: postiz-redis-config + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: REDIS_URL + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/redis + metadataPolicy: None + property: REDIS_URL + - secretKey: user + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/redis + metadataPolicy: None + property: user + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/postiz/redis + metadataPolicy: None + property: password +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-oidc-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-oidc-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: client + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/postiz + metadataPolicy: None + property: client + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/postiz + metadataPolicy: None + property: secret +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-config-backup-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-config-backup-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/postiz/postiz-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-uploads-backup-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-uploads-backup-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/postiz/postiz-uploads" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-cloudflared-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-cloudflared-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/postiz + metadataPolicy: None + property: token +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-postgresql-17-cluster-backup-secret + namespace: postiz + labels: + app.kubernetes.io/name: postiz-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: postiz/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: postiz-postgresql-17-cluster-backup-secret-garage + namespace: postiz + labels: + app.kubernetes.io/name: postiz-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: postiz/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-postiz + namespace: postiz + labels: + app.kubernetes.io/name: http-route-postiz + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - postiz.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: postiz + port: 80 + weight: 100 +--- +# Source: postiz/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "postiz-postgresql-17-external-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/postiz/postiz-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: postiz-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: postiz-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: postiz/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "postiz-postgresql-17-garage-local-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/postiz/postiz-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: postiz/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "postiz-postgresql-17-recovery" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/postiz/postiz-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: postiz-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: postiz/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: postiz-postgresql-17-alert-rules + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/postiz-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="postiz"} - cnpg_pg_replication_is_wal_receiver_up{namespace="postiz"}) < 1 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="postiz"} - cnpg_pg_replication_is_wal_receiver_up{namespace="postiz"}) < 2 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="postiz",pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="postiz", persistentvolumeclaim=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="postiz",pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "postiz/postiz-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="postiz", pod=~"postiz-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: postiz + cnpg_cluster: postiz-postgresql-17-cluster +--- +# Source: postiz/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-postiz + namespace: postiz + labels: + app.kubernetes.io/name: redis-replication-postiz + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + redisSecret: + name: postiz-redis-config + key: password + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: postiz/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: postiz-config-backup-source + namespace: postiz + labels: + app.kubernetes.io/name: postiz-config-backup-source + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + sourcePVC: postiz-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: postiz-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: postiz/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: postiz-uploads-backup-source + namespace: postiz + labels: + app.kubernetes.io/name: postiz-uploads-backup-source + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz +spec: + sourcePVC: postiz-uploads + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: postiz-uploads-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: postiz/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "postiz-postgresql-17-daily-backup-scheduled-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: postiz-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "postiz-postgresql-17-external-backup" +--- +# Source: postiz/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "postiz-postgresql-17-live-backup-scheduled-backup" + namespace: postiz + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: postiz-postgresql-17 + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: postiz-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "postiz-postgresql-17-garage-local-backup" +--- +# Source: postiz/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-postiz + namespace: postiz + labels: + app.kubernetes.io/name: redis-replication-postiz + app.kubernetes.io/instance: postiz + app.kubernetes.io/part-of: postiz + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/roundcube/roundcube.yaml b/clusters/cl01tl/manifests/roundcube/roundcube.yaml new file mode 100644 index 000000000..ae0289b05 --- /dev/null +++ b/clusters/cl01tl/manifests/roundcube/roundcube.yaml @@ -0,0 +1,1005 @@ +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: roundcube + labels: + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +data: + default.conf: | + server { + listen 80 default_server; + server_name _; + root /var/www/html; + + location / { + try_files $uri /index.php$is_args$args; + } + + location ~ \.php(/|$) { + try_files $uri =404; + fastcgi_pass roundcube:9000; + fastcgi_read_timeout 300; + proxy_read_timeout 300; + fastcgi_split_path_info ^(.+\.php)(/.*)$; + include fastcgi_params; + fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name; + fastcgi_param DOCUMENT_ROOT $realpath_root; + internal; + } + + client_max_body_size 6m; + + error_log /var/log/nginx/error.log; + access_log /var/log/nginx/access.log; + } +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: roundcube-data + labels: + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: roundcube +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: roundcube + labels: + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + app.kubernetes.io/service: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: mail + - port: 80 + targetPort: 80 + protocol: TCP + name: web + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: roundcube + app.kubernetes.io/name: roundcube +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: roundcube-main + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: roundcube + app.kubernetes.io/instance: roundcube + template: + metadata: + annotations: + checksum/configMaps: fb5b79e14a16673def67423a38952ae1855171d07a8332d9e863febcd28fce92 + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: roundcube + app.kubernetes.io/name: roundcube + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: ROUNDCUBEMAIL_DB_TYPE + value: pgsql + - name: ROUNDCUBEMAIL_DB_HOST + valueFrom: + secretKeyRef: + key: host + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_NAME + valueFrom: + secretKeyRef: + key: dbname + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_USER + valueFrom: + secretKeyRef: + key: user + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DES_KEY + valueFrom: + secretKeyRef: + key: DES_KEY + name: roundcube-key-secret + - name: ROUNDCUBEMAIL_DEFAULT_HOST + value: stalwart.stalwart + - name: ROUNDCUBEMAIL_DEFAULT_PORT + value: "143" + - name: ROUNDCUBEMAIL_SMTP_SERVER + value: stalwart.stalwart + - name: ROUNDCUBEMAIL_SMTP_PORT + value: "25" + - name: ROUNDCUBEMAIL_SKIN + value: elastic + - name: ROUNDCUBEMAIL_PLUGINS + value: archive,zipdownload,newmail_notifier + image: roundcube/roundcubemail:1.6.11-fpm-alpine + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /var/www/html + name: data + - mountPath: /tmp/roundcube-temp + name: temp + - env: + - name: NGINX_HOST + value: mail.alexlebens.net + - name: NGINX_PHP_CGI + value: roundcube.roundcube:9000 + image: nginx:1.29.3-alpine + imagePullPolicy: IfNotPresent + name: nginx + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /etc/nginx/conf.d/default.conf + mountPropagation: None + name: config + readOnly: true + subPath: default.conf + - mountPath: /var/www/html + name: data + volumes: + - configMap: + name: roundcube-config + name: config + - name: data + persistentVolumeClaim: + claimName: roundcube-data + - emptyDir: {} + name: temp +--- +# Source: roundcube/charts/roundcube/templates/common.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: roundcube-cleandb + labels: + app.kubernetes.io/controller: cleandb + app.kubernetes.io/instance: roundcube + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: roundcube + helm.sh/chart: roundcube-4.4.0 + namespace: roundcube +spec: + suspend: false + concurrencyPolicy: Forbid + startingDeadlineSeconds: 90 + timeZone: US/Central + schedule: "30 4 * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + parallelism: 1 + backoffLimit: 3 + template: + metadata: + annotations: + checksum/configMaps: fb5b79e14a16673def67423a38952ae1855171d07a8332d9e863febcd28fce92 + labels: + app.kubernetes.io/controller: cleandb + app.kubernetes.io/instance: roundcube + app.kubernetes.io/name: roundcube + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + restartPolicy: Never + containers: + - args: + - bin/cleandb.sh + env: + - name: ROUNDCUBEMAIL_DB_TYPE + value: pgsql + - name: ROUNDCUBEMAIL_DB_HOST + valueFrom: + secretKeyRef: + key: host + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_NAME + valueFrom: + secretKeyRef: + key: dbname + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_USER + valueFrom: + secretKeyRef: + key: user + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: roundcube-postgresql-17-cluster-app + - name: ROUNDCUBEMAIL_DES_KEY + valueFrom: + secretKeyRef: + key: DES_KEY + name: roundcube-key-secret + - name: ROUNDCUBEMAIL_DEFAULT_HOST + value: tls://stalwart.stalwart + - name: ROUNDCUBEMAIL_SMTP_SERVER + value: tls://stalwart.stalwart + - name: ROUNDCUBEMAIL_SKIN + value: elastic + - name: ROUNDCUBEMAIL_PLUGINS + value: archive,zipdownload,newmail_notifier + image: roundcube/roundcubemail:1.6.11-fpm-alpine + imagePullPolicy: IfNotPresent + name: backup + resources: + requests: + cpu: 100m + memory: 128Mi +--- +# Source: roundcube/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: roundcube-postgresql-17-cluster + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "roundcube-postgresql-17-external-backup" + serverName: "roundcube-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "roundcube-postgresql-17-garage-local-backup" + serverName: "roundcube-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "roundcube-postgresql-17-recovery" + serverName: roundcube-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: roundcube-postgresql-17-backup-1 + + externalClusters: + - name: roundcube-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "roundcube-postgresql-17-recovery" + serverName: roundcube-postgresql-17-backup-1 +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-key-secret + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-key-secret + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: DES_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/roundcube/key + metadataPolicy: None + property: DES_KEY +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-data-backup-secret + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-data-backup-secret + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/roundcube/roundcube-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-postgresql-17-cluster-backup-secret + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: roundcube/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: roundcube-postgresql-17-cluster-backup-secret-garage + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: roundcube/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-mail + namespace: roundcube + labels: + app.kubernetes.io/name: http-route-mail + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - mail.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: roundcube + port: 80 + weight: 100 +--- +# Source: roundcube/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "roundcube-postgresql-17-external-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/roundcube/roundcube-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: roundcube-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: roundcube-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: roundcube/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "roundcube-postgresql-17-garage-local-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/roundcube/roundcube-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: roundcube/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "roundcube-postgresql-17-recovery" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/roundcube/roundcube-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: roundcube-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: roundcube/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: roundcube-postgresql-17-alert-rules + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/roundcube-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="roundcube"} - cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}) < 1 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="roundcube"} - cnpg_pg_replication_is_wal_receiver_up{namespace="roundcube"}) < 2 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="roundcube",pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="roundcube", persistentvolumeclaim=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="roundcube",pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "roundcube/roundcube-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="roundcube", pod=~"roundcube-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: roundcube + cnpg_cluster: roundcube-postgresql-17-cluster +--- +# Source: roundcube/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: roundcube-data-backup-source + namespace: roundcube + labels: + app.kubernetes.io/name: roundcube-data-backup-source + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube +spec: + sourcePVC: roundcube-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: roundcube-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: roundcube/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "roundcube-postgresql-17-daily-backup-scheduled-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: roundcube-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "roundcube-postgresql-17-external-backup" +--- +# Source: roundcube/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "roundcube-postgresql-17-live-backup-scheduled-backup" + namespace: roundcube + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: roundcube-postgresql-17 + app.kubernetes.io/instance: roundcube + app.kubernetes.io/part-of: roundcube + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: roundcube-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "roundcube-postgresql-17-garage-local-backup" diff --git a/clusters/cl01tl/manifests/searxng/searxng.yaml b/clusters/cl01tl/manifests/searxng/searxng.yaml new file mode 100644 index 000000000..3da7271ac --- /dev/null +++ b/clusters/cl01tl/manifests/searxng/searxng.yaml @@ -0,0 +1,435 @@ +--- +# Source: searxng/charts/searxng/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: searxng-browser-data + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: searxng/charts/searxng/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: searxng-api-data + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: searxng-api + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + app.kubernetes.io/service: searxng-api + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + type: ClusterIP + ports: + - port: 8080 + targetPort: 8080 + protocol: TCP + name: mail + selector: + app.kubernetes.io/controller: api + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: searxng-browser + labels: + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + app.kubernetes.io/service: searxng-browser + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: mail + selector: + app.kubernetes.io/controller: browser + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: searxng-browser + labels: + app.kubernetes.io/controller: browser + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: browser + app.kubernetes.io/name: searxng + app.kubernetes.io/instance: searxng + template: + metadata: + labels: + app.kubernetes.io/controller: browser + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: SEARXNG_BASE_URL + value: https://searxng.alexlebens.net/ + - name: SEARXNG_QUERY_URL + value: https://searxng.alexlebens.net/search?q= + - name: SEARXNG_HOSTNAME + value: searxng.alexlebens.net + - name: SEARXNG_REDIS_URL + value: redis://redis-replication-searxng-master.searxng:6379/0 + - name: UWSGI_WORKERS + value: "4" + - name: UWSGI_THREADS + value: "4" + image: searxng/searxng:latest@sha256:faa7118f9167c2c1e09a3fbb9bd87eee0905d76456d297e62e815646afc97037 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /etc/searxng + name: browser-data + volumes: + - name: browser-data + persistentVolumeClaim: + claimName: searxng-browser-data +--- +# Source: searxng/charts/searxng/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: searxng-api + labels: + app.kubernetes.io/controller: api + app.kubernetes.io/instance: searxng + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: searxng + helm.sh/chart: searxng-4.4.0 + namespace: searxng +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: api + app.kubernetes.io/name: searxng + app.kubernetes.io/instance: searxng + template: + metadata: + labels: + app.kubernetes.io/controller: api + app.kubernetes.io/instance: searxng + app.kubernetes.io/name: searxng + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: SEARXNG_BASE_URL + value: http://searxng-api.searxng:8080 + - name: SEARXNG_QUERY_URL + value: http://searxng-api.searxng:8080/search?q= + - name: SEARXNG_HOSTNAME + value: searxng-api.searxng + - name: UWSGI_WORKERS + value: "4" + - name: UWSGI_THREADS + value: "4" + - name: ENABLE_RAG_WEB_SEARCH + value: "true" + - name: RAG_WEB_SEARCH_ENGINE + value: searxng + - name: RAG_WEB_SEARCH_RESULT_COUNT + value: "3" + - name: RAG_WEB_SEARCH_CONCURRENT_REQUESTS + value: "10" + image: searxng/searxng:latest@sha256:faa7118f9167c2c1e09a3fbb9bd87eee0905d76456d297e62e815646afc97037 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /etc/searxng + name: api-data + - mountPath: /etc/searxng/settings.yml + mountPropagation: None + name: config + readOnly: true + subPath: settings.yml + - mountPath: /etc/searxng/limiter.toml + mountPropagation: None + name: config + readOnly: true + subPath: limiter.toml + volumes: + - name: api-data + persistentVolumeClaim: + claimName: searxng-api-data + - name: config + secret: + secretName: searxng-api-config-secret +--- +# Source: searxng/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: searxng-api-config-secret + namespace: searxng + labels: + app.kubernetes.io/name: searxng-api-config-secret + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: settings.yml + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/searxng/api/config + metadataPolicy: None + property: settings.yml + - secretKey: limiter.toml + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/searxng/api/config + metadataPolicy: None + property: limiter.toml +--- +# Source: searxng/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: searxng-browser-data-backup-secret + namespace: searxng + labels: + app.kubernetes.io/name: searxng-browser-data-backup-secret + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/searxng/searxng-browser-data" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: searxng/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-searxng + namespace: searxng + labels: + app.kubernetes.io/name: http-route-searxng + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - searxng.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: searxng-browser + port: 80 + weight: 100 +--- +# Source: searxng/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-searxng + namespace: searxng + labels: + app.kubernetes.io/name: redis-replication-searxng + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: searxng/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: searxng-browser-data-backup-source + namespace: searxng + labels: + app.kubernetes.io/name: searxng-browser-data-backup-source + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng +spec: + sourcePVC: searxng-browser-data + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: searxng-browser-data-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: searxng/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-searxng + namespace: searxng + labels: + app.kubernetes.io/name: redis-replication-searxng + app.kubernetes.io/instance: searxng + app.kubernetes.io/part-of: searxng + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/site-documentation/site-documentation.yaml b/clusters/cl01tl/manifests/site-documentation/site-documentation.yaml new file mode 100644 index 000000000..96df16ff6 --- /dev/null +++ b/clusters/cl01tl/manifests/site-documentation/site-documentation.yaml @@ -0,0 +1,153 @@ +--- +# Source: site-documentation/charts/site-documentation/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: site-documentation + labels: + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-documentation + app.kubernetes.io/service: site-documentation + helm.sh/chart: site-documentation-4.4.0 + namespace: site-documentation +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 4321 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/name: site-documentation +--- +# Source: site-documentation/charts/cloudflared-site/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-documentation-cloudflared-site + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-site-1.23.0 + namespace: site-documentation +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/instance: site-documentation + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/name: cloudflared-site + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: site-documentation-cloudflared-api-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-documentation/charts/site-documentation/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-documentation + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-documentation + helm.sh/chart: site-documentation-4.4.0 + namespace: site-documentation +spec: + revisionHistoryLimit: 3 + replicas: 3 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: site-documentation + app.kubernetes.io/instance: site-documentation + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/name: site-documentation + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: harbor.alexlebens.net/images/site-documentation:0.0.3 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-documentation/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: site-documentation-cloudflared-api-secret + namespace: site-documentation + labels: + app.kubernetes.io/name: site-documentation-cloudflared-api-secret + app.kubernetes.io/instance: site-documentation + app.kubernetes.io/part-of: site-documentation +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/site-documentation + metadataPolicy: None + property: token diff --git a/clusters/cl01tl/manifests/site-profile/site-profile.yaml b/clusters/cl01tl/manifests/site-profile/site-profile.yaml new file mode 100644 index 000000000..f16ff4c21 --- /dev/null +++ b/clusters/cl01tl/manifests/site-profile/site-profile.yaml @@ -0,0 +1,153 @@ +--- +# Source: site-profile/charts/site-profile/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: site-profile + labels: + app.kubernetes.io/instance: site-profile + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-profile + app.kubernetes.io/service: site-profile + helm.sh/chart: site-profile-4.4.0 + namespace: site-profile +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 4321 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/name: site-profile +--- +# Source: site-profile/charts/cloudflared-site/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-profile-cloudflared-site + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/version: 2025.10.0 + helm.sh/chart: cloudflared-site-1.23.0 + namespace: site-profile +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: cloudflared-site + app.kubernetes.io/instance: site-profile + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/name: cloudflared-site + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - args: + - tunnel + - --protocol + - http2 + - --no-autoupdate + - run + - --token + - $(CF_MANAGED_TUNNEL_TOKEN) + env: + - name: CF_MANAGED_TUNNEL_TOKEN + valueFrom: + secretKeyRef: + key: cf-tunnel-token + name: site-profile-cloudflared-api-secret + image: cloudflare/cloudflared:2025.11.1 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-profile/charts/site-profile/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: site-profile + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: site-profile + helm.sh/chart: site-profile-4.4.0 + namespace: site-profile +spec: + revisionHistoryLimit: 3 + replicas: 3 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: site-profile + app.kubernetes.io/instance: site-profile + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: site-profile + app.kubernetes.io/name: site-profile + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: harbor.alexlebens.net/images/site-profile:2.1.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: site-profile/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: site-profile-cloudflared-api-secret + namespace: site-profile + labels: + app.kubernetes.io/name: site-profile-cloudflared-api-secret + app.kubernetes.io/instance: site-profile + app.kubernetes.io/part-of: site-profile +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: cf-tunnel-token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cloudflare/tunnels/site-profile + metadataPolicy: None + property: token diff --git a/clusters/cl01tl/manifests/slskd/slskd.yaml b/clusters/cl01tl/manifests/slskd/slskd.yaml new file mode 100644 index 000000000..dc896bd7d --- /dev/null +++ b/clusters/cl01tl/manifests/slskd/slskd.yaml @@ -0,0 +1,396 @@ +--- +# Source: slskd/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: slskd + labels: + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: slskd/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: slskd-nfs-storage + namespace: slskd + labels: + app.kubernetes.io/name: slskd-nfs-storage + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: slskd/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: slskd-nfs-storage + namespace: slskd + labels: + app.kubernetes.io/name: slskd-nfs-storage + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + volumeName: slskd-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: slskd/charts/slskd/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: slskd + labels: + app.kubernetes.io/instance: slskd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: slskd + app.kubernetes.io/service: slskd + helm.sh/chart: slskd-4.4.0 + namespace: slskd +spec: + type: ClusterIP + ports: + - port: 5030 + targetPort: 5030 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: slskd + app.kubernetes.io/name: slskd +--- +# Source: slskd/charts/slskd/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: slskd-main + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: slskd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: slskd + helm.sh/chart: slskd-4.4.0 + namespace: slskd +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: slskd + app.kubernetes.io/name: slskd + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + initContainers: + - args: + - -ec + - | + sysctl -w net.ipv4.ip_forward=1; + sysctl -w net.ipv6.conf.all.disable_ipv6=1 + command: + - /bin/sh + image: busybox:1.37.0 + imagePullPolicy: IfNotPresent + name: init-sysctl + resources: + requests: + cpu: 10m + memory: 128Mi + securityContext: + privileged: true + containers: + - env: + - name: VPN_SERVICE_PROVIDER + value: protonvpn + - name: VPN_TYPE + value: wireguard + - name: WIREGUARD_PRIVATE_KEY + valueFrom: + secretKeyRef: + key: private-key + name: slskd-wireguard-conf + - name: VPN_PORT_FORWARDING + value: "on" + - name: PORT_FORWARD_ONLY + value: "on" + - name: FIREWALL_OUTBOUND_SUBNETS + value: 192.168.1.0/24,10.244.0.0/16 + - name: FIREWALL_INPUT_PORTS + value: 5030,50300 + - name: DOT + value: "off" + image: ghcr.io/qdm12/gluetun:v3.40.3@sha256:ef4a44819a60469682c7b5e69183e6401171891feaa60186652d292c59e41b30 + imagePullPolicy: IfNotPresent + name: gluetun + resources: + limits: + devic.es/tun: "1" + requests: + cpu: 10m + devic.es/tun: "1" + memory: 128Mi + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: SLSKD_UMASK + value: "0" + image: slskd/slskd:0.24.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 100m + memory: 512Mi + volumeMounts: + - mountPath: /mnt/store + name: data + - mountPath: /app/slskd.yml + mountPropagation: None + name: slskd-config + readOnly: true + subPath: slskd.yml + volumes: + - name: data + persistentVolumeClaim: + claimName: slskd-nfs-storage + - name: slskd-config + secret: + secretName: slskd-config-secret +--- +# Source: slskd/charts/slskd/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: slskd-soularr + labels: + app.kubernetes.io/controller: soularr + app.kubernetes.io/instance: slskd + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: slskd + helm.sh/chart: slskd-4.4.0 + namespace: slskd +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: soularr + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + template: + metadata: + labels: + app.kubernetes.io/controller: soularr + app.kubernetes.io/instance: slskd + app.kubernetes.io/name: slskd + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1000" + - name: PGID + value: "1000" + - name: SCRIPT_INTERVAL + value: "300" + image: mrusse08/soularr:latest@sha256:71a0b9e5a522d76bb0ffdb6d720d681fde22417b3a5acc9ecae61c89d05d8afc + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 256Mi + volumeMounts: + - mountPath: /mnt/store + name: data + - mountPath: /data/config.ini + mountPropagation: None + name: soularr-config + readOnly: true + subPath: config.ini + volumes: + - name: data + persistentVolumeClaim: + claimName: slskd-nfs-storage + - name: soularr-config + secret: + secretName: soularr-config-secret +--- +# Source: slskd/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: slskd-config-secret + namespace: slskd + labels: + app.kubernetes.io/name: slskd-config-secret + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: slskd.yml + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/slskd/config + metadataPolicy: None + property: slskd.yml +--- +# Source: slskd/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: soularr-config-secret + namespace: slskd + labels: + app.kubernetes.io/name: soularr-config-secret + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: config.ini + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/slskd/soularr + metadataPolicy: None + property: config.ini +--- +# Source: slskd/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: slskd-wireguard-conf + namespace: slskd + labels: + app.kubernetes.io/name: slskd-wireguard-conf + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: private-key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /protonvpn/conf/cl01tl + metadataPolicy: None + property: private-key +--- +# Source: slskd/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-slskd + namespace: slskd + labels: + app.kubernetes.io/name: http-route-slskd + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - slskd.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: slskd + port: 5030 + weight: 100 +--- +# Source: slskd/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: slskd + namespace: slskd + labels: + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + app.kubernetes.io/part-of: slskd +spec: + selector: + matchLabels: + app.kubernetes.io/name: slskd + app.kubernetes.io/instance: slskd + endpoints: + - port: http + interval: 3m + scrapeTimeout: 1m + path: /metrics diff --git a/clusters/cl01tl/manifests/tdarr/tdarr.yaml b/clusters/cl01tl/manifests/tdarr/tdarr.yaml new file mode 100644 index 000000000..4da17a419 --- /dev/null +++ b/clusters/cl01tl/manifests/tdarr/tdarr.yaml @@ -0,0 +1,658 @@ +--- +# Source: tdarr/templates/persistent-volume.yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: tdarr-nfs-storage + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-nfs-storage + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs-client + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + path: /volume2/Storage + server: synologybond.alexlebens.net + mountOptions: + - vers=4 + - minorversion=1 + - noac +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tdarr-config + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: tdarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "50Gi" + storageClassName: "ceph-block" +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tdarr-server + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: tdarr +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "50Gi" + storageClassName: "ceph-block" +--- +# Source: tdarr/templates/persistent-volume-claim.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: tdarr-nfs-storage + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-nfs-storage + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + volumeName: tdarr-nfs-storage + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi +--- +# Source: tdarr/charts/tdarr-exporter/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: tdarr-tdarr-exporter + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 9090 + targetPort: 9090 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: tdarr-api + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + app.kubernetes.io/service: tdarr-api + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + type: ClusterIP + ports: + - port: 8266 + targetPort: 8266 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: tdarr-web + labels: + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + app.kubernetes.io/service: tdarr-web + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + type: ClusterIP + ports: + - port: 8265 + targetPort: 8265 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: tdarr-node + labels: + app.kubernetes.io/controller: node + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/controller: node + app.kubernetes.io/name: tdarr + app.kubernetes.io/instance: tdarr + template: + metadata: + annotations: + + labels: + app.kubernetes.io/controller: node + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + nodeSelector: + intel.feature.node.kubernetes.io/gpu: "true" + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1001" + - name: PGID + value: "1001" + - name: UMASK_SET + value: "002" + - name: ffmpegVersion + value: "6" + - name: inContainer + value: "true" + - name: nodeName + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: serverIP + value: tdarr-api + - name: serverPort + value: "8266" + image: ghcr.io/haveagitgat/tdarr_node:2.58.02 + imagePullPolicy: IfNotPresent + name: main + resources: + limits: + gpu.intel.com/i915: 1 + requests: + cpu: 10m + gpu.intel.com/i915: 1 + memory: 512Mi + volumeMounts: + - mountPath: /mnt/store + name: media + readOnly: true + - mountPath: /tcache + name: node-cache + volumes: + - name: media + persistentVolumeClaim: + claimName: tdarr-nfs-storage + - emptyDir: {} + name: node-cache +--- +# Source: tdarr/charts/tdarr-exporter/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tdarr-tdarr-exporter + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + template: + metadata: + annotations: + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm + spec: + serviceAccountName: default + securityContext: + {} + containers: + - name: tdarr-exporter + securityContext: + {} + image: "docker.io/homeylab/tdarr-exporter:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: TDARR_URL + value: "http://tdarr-web.tdarr:8265" + - name: VERIFY_SSL + value: "false" + - name: LOG_LEVEL + value: "info" + - name: PROMETHEUS_PORT + value: "9090" + - name: PROMETHEUS_PATH + value: "/metrics" + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: metrics + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: metrics + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + startupProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: metrics + initialDelaySeconds: 2 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 2 + resources: + requests: + cpu: 10m + memory: 256Mi +--- +# Source: tdarr/charts/tdarr/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tdarr-server + labels: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tdarr + helm.sh/chart: tdarr-4.4.0 + namespace: tdarr +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: server + app.kubernetes.io/name: tdarr + app.kubernetes.io/instance: tdarr + template: + metadata: + labels: + app.kubernetes.io/controller: server + app.kubernetes.io/instance: tdarr + app.kubernetes.io/name: tdarr + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: TZ + value: US/Central + - name: PUID + value: "1001" + - name: PGID + value: "1001" + - name: UMASK_SET + value: "002" + - name: ffmpegVersion + value: "6" + - name: internalNode + value: "false" + - name: inContainer + value: "true" + - name: nodeName + value: tdarr-server + - name: serverIP + value: 0.0.0.0 + - name: serverPort + value: "8266" + - name: webUIPort + value: "8265" + image: ghcr.io/haveagitgat/tdarr:2.58.02 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 200m + memory: 1Gi + volumeMounts: + - mountPath: /app/configs + name: config + - mountPath: /mnt/store + name: media + readOnly: true + - mountPath: /app/server + name: server + - mountPath: /tcache + name: server-cache + volumes: + - name: config + persistentVolumeClaim: + claimName: tdarr-config + - name: media + persistentVolumeClaim: + claimName: tdarr-nfs-storage + - name: server + persistentVolumeClaim: + claimName: tdarr-server + - emptyDir: {} + name: server-cache +--- +# Source: tdarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tdarr-config-backup-secret + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-config-backup-secret + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-config" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: tdarr/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: tdarr-server-backup-secret + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-server-backup-secret + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + mergePolicy: Merge + engineVersion: v2 + data: + RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-server" + data: + - secretKey: BUCKET_ENDPOINT + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: S3_BUCKET_ENDPOINT + - secretKey: RESTIC_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: RESTIC_PASSWORD + - secretKey: AWS_DEFAULT_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/volsync/restic/config + metadataPolicy: None + property: AWS_DEFAULT_REGION + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: access_key + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/volsync-backups + metadataPolicy: None + property: secret_key +--- +# Source: tdarr/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-tdarr + namespace: tdarr + labels: + app.kubernetes.io/name: http-route-tdarr + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - tdarr.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: tdarr-web + port: 8265 + weight: 100 +--- +# Source: tdarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: tdarr-config-backup-source + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-config-backup-source + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + sourcePVC: tdarr-config + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: tdarr-config-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: tdarr/templates/replication-source.yaml +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: tdarr-server-backup-source + namespace: tdarr + labels: + app.kubernetes.io/name: tdarr-server-backup-source + app.kubernetes.io/instance: tdarr + app.kubernetes.io/part-of: tdarr +spec: + sourcePVC: tdarr-server + trigger: + schedule: 0 4 * * * + restic: + pruneIntervalDays: 7 + repository: tdarr-server-backup-secret + retain: + hourly: 1 + daily: 3 + weekly: 2 + monthly: 2 + yearly: 4 + copyMethod: Snapshot + storageClassName: ceph-block + volumeSnapshotClassName: ceph-blockpool-snapshot +--- +# Source: tdarr/charts/tdarr-exporter/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm + name: tdarr-tdarr-exporter +spec: + endpoints: + - interval: 1m + path: /metrics + port: metrics + scrapeTimeout: 15s + namespaceSelector: + matchNames: + - tdarr + selector: + matchLabels: + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr +--- +# Source: tdarr/charts/tdarr-exporter/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "tdarr-tdarr-exporter-test-connection" + labels: + helm.sh/chart: tdarr-exporter-1.1.7 + app.kubernetes.io/name: tdarr-exporter + app.kubernetes.io/instance: tdarr + app.kubernetes.io/version: "1.4.3" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: "docker.io/busybox:1.36.1" + command: ['wget'] + args: ['tdarr-tdarr-exporter:9090/healthz'] + restartPolicy: Never