remove n8n (#2887)

Reviewed-on: #2887
This commit was merged in pull request #2887.
This commit is contained in:
2025-12-27 18:44:22 +00:00
parent 608674835e
commit 8ca5cead5e
29 changed files with 0 additions and 1429 deletions

View File

@@ -1,66 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: n8n-postgresql-18-cluster
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:18.1-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "n8n-postgresql-18-backup-garage-local"
serverName: "n8n-postgresql-18-backup-1"
bootstrap:
recovery:
database: app
source: n8n-postgresql-18-backup-1
externalClusters:
- name: n8n-postgresql-18-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "n8n-postgresql-18-recovery"
serverName: n8n-postgresql-18-backup-1

View File

@@ -1,118 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: n8n-webhook
labels:
app.kubernetes.io/controller: webhook
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: webhook
app.kubernetes.io/name: n8n
app.kubernetes.io/instance: n8n
template:
metadata:
annotations:
labels:
app.kubernetes.io/controller: webhook
app.kubernetes.io/instance: n8n
app.kubernetes.io/name: n8n
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/arch: amd64
containers:
- args:
- webhook
command:
- n8n
env:
- name: GENERIC_TIMEZONE
value: US/Central
- name: DB_TYPE
value: postgresdb
- name: DB_POSTGRESDB_DATABASE
valueFrom:
secretKeyRef:
key: dbname
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_HOST
valueFrom:
secretKeyRef:
key: host
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_PORT
valueFrom:
secretKeyRef:
key: port
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
key: user
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: n8n-postgresql-18-cluster-app
- name: N8N_METRICS
value: "true"
- name: QUEUE_HEALTH_CHECK_ACTIVE
value: "true"
- name: EXECUTIONS_MODE
value: queue
- name: QUEUE_BULL_REDIS_HOST
value: redis-replication-n8n-master.n8n
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
key: key
name: n8n-config-secret
- name: WEBHOOK_URL
value: https://n8n.alexlebens.net/
- name: N8N_DIAGNOSTICS_ENABLED
value: "false"
- name: N8N_VERSION_NOTIFICATIONS_ENABLED
value: "false"
image: ghcr.io/n8n-io/n8n:2.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 5678
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: main
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz/readiness
port: 5678
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /home/node/.n8n
name: cache
volumes:
- emptyDir: {}
name: cache

View File

@@ -1,104 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: n8n-worker
labels:
app.kubernetes.io/controller: worker
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: worker
app.kubernetes.io/name: n8n
app.kubernetes.io/instance: n8n
template:
metadata:
annotations:
labels:
app.kubernetes.io/controller: worker
app.kubernetes.io/instance: n8n
app.kubernetes.io/name: n8n
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/arch: amd64
containers:
- args:
- worker
command:
- n8n
env:
- name: GENERIC_TIMEZONE
value: US/Central
- name: DB_TYPE
value: postgresdb
- name: DB_POSTGRESDB_DATABASE
valueFrom:
secretKeyRef:
key: dbname
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_HOST
valueFrom:
secretKeyRef:
key: host
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_PORT
valueFrom:
secretKeyRef:
key: port
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
key: user
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: n8n-postgresql-18-cluster-app
- name: N8N_METRICS
value: "true"
- name: N8N_RUNNERS_ENABLED
value: "true"
- name: N8N_BLOCK_ENV_ACCESS_IN_NODE
value: "true"
- name: N8N_GIT_NODE_DISABLE_BARE_REPOS
value: "true"
- name: QUEUE_HEALTH_CHECK_ACTIVE
value: "true"
- name: EXECUTIONS_MODE
value: queue
- name: QUEUE_BULL_REDIS_HOST
value: redis-replication-n8n-master.n8n
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
key: key
name: n8n-config-secret
- name: WEBHOOK_URL
value: https://n8n.alexlebens.net/
image: ghcr.io/n8n-io/n8n:2.2.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /home/node/.n8n
name: cache
volumes:
- emptyDir: {}
name: cache

View File

@@ -1,111 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: n8n
app.kubernetes.io/instance: n8n
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: n8n
app.kubernetes.io/name: n8n
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: GENERIC_TIMEZONE
value: US/Central
- name: DB_TYPE
value: postgresdb
- name: DB_POSTGRESDB_DATABASE
valueFrom:
secretKeyRef:
key: dbname
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_HOST
valueFrom:
secretKeyRef:
key: host
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_PORT
valueFrom:
secretKeyRef:
key: port
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
key: user
name: n8n-postgresql-18-cluster-app
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: n8n-postgresql-18-cluster-app
- name: N8N_METRICS
value: "true"
- name: QUEUE_HEALTH_CHECK_ACTIVE
value: "true"
- name: EXECUTIONS_MODE
value: queue
- name: QUEUE_BULL_REDIS_HOST
value: redis-replication-n8n-master.n8n
- name: N8N_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
key: key
name: n8n-config-secret
- name: WEBHOOK_URL
value: https://n8n.alexlebens.net/
image: ghcr.io/n8n-io/n8n:2.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 5678
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: main
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz/readiness
port: 5678
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: n8n

View File

@@ -1,58 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: n8n-backup-secret-external
namespace: n8n
labels:
helm.sh/chart: volsync-target-data-0.6.0
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "0.6.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-backup-secret-external
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/n8n/n8n"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY

View File

@@ -1,58 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: n8n-backup-secret-local
namespace: n8n
labels:
helm.sh/chart: volsync-target-data-0.6.0
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "0.6.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-backup-secret-local
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/n8n/n8n"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,58 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: n8n-backup-secret-remote
namespace: n8n
labels:
helm.sh/chart: volsync-target-data-0.6.0
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "0.6.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-backup-secret-remote
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/n8n/n8n"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,21 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: n8n-config-secret
namespace: n8n
labels:
app.kubernetes.io/name: n8n-config-secret
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/n8n/config
metadataPolicy: None
property: key

View File

@@ -1,39 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: n8n-postgresql-18-backup-garage-local-secret
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-postgresql-18-backup-garage-local-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,39 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: n8n-postgresql-18-recovery-secret
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-postgresql-18-recovery-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,50 +0,0 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: n8n
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- "n8n.alexlebens.net"
rules:
- backendRefs:
- group: ""
kind: Service
name: n8n-main
namespace: n8n
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
- path:
type: PathPrefix
value: /webhook-test/
- backendRefs:
- group: ""
kind: Service
name: n8n-webhook
namespace: n8n
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /webhook/
- path:
type: PathPrefix
value: /webhook-waiting/
- path:
type: PathPrefix
value: /form/

View File

@@ -1,28 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: n8n-postgresql-18-backup-garage-local
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-postgresql-18-backup-garage-local
spec:
retentionPolicy: 7d
configuration:
destinationPath: s3://postgres-backups/cl01tl/n8n/n8n-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: n8n-postgresql-18-backup-garage-local-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: n8n-postgresql-18-backup-garage-local-secret
key: ACCESS_SECRET_KEY
region:
name: n8n-postgresql-18-backup-garage-local-secret
key: ACCESS_REGION

View File

@@ -1,33 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "n8n-postgresql-18-recovery"
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "n8n-postgresql-18-recovery"
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/n8n/n8n-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: n8n-postgresql-18-recovery-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: n8n-postgresql-18-recovery-secret
key: ACCESS_SECRET_KEY
region:
name: n8n-postgresql-18-recovery-secret
key: ACCESS_REGION

View File

@@ -1,17 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: n8n
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"

View File

@@ -1,270 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: n8n-postgresql-18-alert-rules
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/n8n-postgresql-18
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="n8n"} - cnpg_pg_replication_is_wal_receiver_up{namespace="n8n"}) < 1
for: 5m
labels:
severity: critical
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="n8n"} - cnpg_pg_replication_is_wal_receiver_up{namespace="n8n"}) < 2
for: 5m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="n8n", pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="n8n", pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="n8n", pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="n8n", pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="n8n",pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="n8n", pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="n8n", persistentvolumeclaim=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="n8n",pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag{namespace="n8n"} > 300
for: 1m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "n8n/n8n-postgresql-18-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="n8n", pod=~"n8n-postgresql-18-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: n8n
cnpg_cluster: n8n-postgresql-18-cluster

View File

@@ -1,36 +0,0 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-n8n
namespace: n8n
labels:
helm.sh/chart: redis-replication-0.5.0
app.kubernetes.io/version: "0.5.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis-replication-n8n
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
spec:
clusterSize: 3
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
kubernetesConfig:
image: "quay.io/opstree/redis:v8.4.0"
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 10m
memory: 32Mi
storage:
volumeClaimTemplate:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ceph-block
redisExporter:
enabled: true
image: "quay.io/opstree/redis-exporter:v1.80.1"

View File

@@ -1,29 +0,0 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisSentinel
metadata:
name: redis-sentinel-n8n
namespace: n8n
labels:
helm.sh/chart: redis-replication-0.5.0
app.kubernetes.io/version: "0.5.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis-sentinel-n8n
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
spec:
clusterSize: 3
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
redisSentinelConfig:
redisReplicationName: redis-replication-n8n
kubernetesConfig:
image: "quay.io/opstree/redis-sentinel:v8.4.0"
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 10m
memory: 32Mi
redisExporter:
enabled: true
image: "quay.io/opstree/redis-exporter:v1.80.1"

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: n8n-backup-source-external
namespace: n8n
labels:
helm.sh/chart: volsync-target-data-0.6.0
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "0.6.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-backup
spec:
sourcePVC: n8n
trigger:
schedule: 0 9 * * *
restic:
pruneIntervalDays: 7
repository: n8n-backup-secret-external
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: n8n-backup-source-local
namespace: n8n
labels:
helm.sh/chart: volsync-target-data-0.6.0
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "0.6.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-backup
spec:
sourcePVC: n8n
trigger:
schedule: 0 8 * * *
restic:
pruneIntervalDays: 7
repository: n8n-backup-secret-local
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: n8n-backup-source-remote
namespace: n8n
labels:
helm.sh/chart: volsync-target-data-0.6.0
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "0.6.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n-backup
spec:
sourcePVC: n8n
trigger:
schedule: 0 10 * * *
restic:
pruneIntervalDays: 7
repository: n8n-backup-secret-remote
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,25 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "n8n-postgresql-18-scheduled-backup-live-backup"
namespace: n8n
labels:
helm.sh/chart: postgres-18-cluster-7.4.4
app.kubernetes.io/name: n8n-postgresql-18
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
app.kubernetes.io/version: "7.4.4"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "n8n-postgresql-18-scheduled-backup-live-backup"
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: n8n-postgresql-18-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "n8n-postgresql-18-backup-garage-local"

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: n8n-main
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
app.kubernetes.io/service: n8n-main
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5678
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: n8n
app.kubernetes.io/name: n8n

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: n8n-webhook
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
app.kubernetes.io/service: n8n-webhook
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5678
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: webhook
app.kubernetes.io/instance: n8n
app.kubernetes.io/name: n8n

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: n8n-worker
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
app.kubernetes.io/service: n8n-worker
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 5678
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: worker
app.kubernetes.io/instance: n8n
app.kubernetes.io/name: n8n

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: n8n-main
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
jobLabel: "n8n-main"
namespaceSelector:
matchNames:
- n8n
selector:
matchLabels:
app.kubernetes.io/instance: n8n-main
app.kubernetes.io/name: n8n-main
endpoints:
- interval: 3m
path: /metrics
port: http
scrapeTimeout: 1m

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: n8n-webhook
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
jobLabel: "n8n-webhook"
namespaceSelector:
matchNames:
- n8n
selector:
matchLabels:
app.kubernetes.io/instance: n8n-webhook
app.kubernetes.io/name: n8n-webhook
endpoints:
- interval: 3m
path: /metrics
port: http
scrapeTimeout: 1m

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: n8n-worker
labels:
app.kubernetes.io/instance: n8n
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: n8n
helm.sh/chart: n8n-4.5.0
namespace: n8n
spec:
jobLabel: "n8n-worker"
namespaceSelector:
matchNames:
- n8n
selector:
matchLabels:
app.kubernetes.io/instance: n8n-worker
app.kubernetes.io/name: n8n-worker
endpoints:
- interval: 3m
path: /metrics
port: http
scrapeTimeout: 1m

View File

@@ -1,22 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-n8n
namespace: n8n
labels:
helm.sh/chart: redis-replication-0.5.0
app.kubernetes.io/version: "0.5.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis-replication-n8n
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
spec:
selector:
matchLabels:
app: redis-replication-n8n
redis_setup_type: replication
role: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -1,22 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-sentinel-n8n
namespace: n8n
labels:
helm.sh/chart: redis-replication-0.5.0
app.kubernetes.io/version: "0.5.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis-sentinel-n8n
app.kubernetes.io/instance: n8n
app.kubernetes.io/part-of: n8n
spec:
selector:
matchLabels:
app: redis-sentinel-n8n
redis_setup_type: sentinel
role: sentinel
endpoints:
- port: sentinel-client
interval: 30s
scrapeTimeout: 10s