From 67db71dea7a7918c4c34879579c785030c6e2c44 Mon Sep 17 00:00:00 2001 From: gitea-bot Date: Fri, 6 Feb 2026 03:58:11 +0000 Subject: [PATCH] chore: Update manifests after change --- ...uster-komodo-postgresql-17-fdb-cluster.yaml | 18 +++++++----------- ...ql-17-fdb-scheduled-backup-live-backup.yaml | 2 +- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/clusters/cl01tl/manifests/komodo/Cluster-komodo-postgresql-17-fdb-cluster.yaml b/clusters/cl01tl/manifests/komodo/Cluster-komodo-postgresql-17-fdb-cluster.yaml index 64b92e04c..94ce3ff8f 100644 --- a/clusters/cl01tl/manifests/komodo/Cluster-komodo-postgresql-17-fdb-cluster.yaml +++ b/clusters/cl01tl/manifests/komodo/Cluster-komodo-postgresql-17-fdb-cluster.yaml @@ -68,15 +68,11 @@ spec: barmanObjectName: "komodo-postgresql-17-fdb-backup-garage-local" serverName: "komodo-postgresql-17-fdb-backup-2" bootstrap: - recovery: - database: app - source: komodo-postgresql-17-fdb-backup-1 + initdb: + database: ferretDB + owner: ferret + postInitApplicationSQL: + - create extension if not exists pg_cron; + - create extension if not exists documentdb cascade; + - grant documentdb_admin_role to ferret; externalClusters: - - name: komodo-postgresql-17-fdb-backup-1 - plugin: - name: barman-cloud.cloudnative-pg.io - enabled: true - isWALArchiver: false - parameters: - barmanObjectName: "komodo-postgresql-17-fdb-recovery" - serverName: komodo-postgresql-17-fdb-backup-1 diff --git a/clusters/cl01tl/manifests/komodo/ScheduledBackup-komodo-postgresql-17-fdb-scheduled-backup-live-backup.yaml b/clusters/cl01tl/manifests/komodo/ScheduledBackup-komodo-postgresql-17-fdb-scheduled-backup-live-backup.yaml index 4ba72f907..a20bc320e 100644 --- a/clusters/cl01tl/manifests/komodo/ScheduledBackup-komodo-postgresql-17-fdb-scheduled-backup-live-backup.yaml +++ b/clusters/cl01tl/manifests/komodo/ScheduledBackup-komodo-postgresql-17-fdb-scheduled-backup-live-backup.yaml @@ -13,7 +13,7 @@ metadata: app.kubernetes.io/name: "komodo-postgresql-17-fdb-scheduled-backup-live-backup" spec: immediate: true - suspend: true + suspend: false schedule: "0 0 0 * * *" backupOwnerReference: self cluster: