Compare commits
56 Commits
ec425df898
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| d604b53054 | |||
|
f071e3444a
|
|||
| 31c29d5256 | |||
|
4640b4ad27
|
|||
| 6152f69f06 | |||
| 397963a73f | |||
| 2334ada792 | |||
| 2b5bd9bd36 | |||
| 931d9b2cfa | |||
|
24aef86291
|
|||
| 6bdb3fbe51 | |||
|
06d06c1d77
|
|||
| 1c471942ed | |||
| 5d080da03e | |||
| a9e645c003 | |||
| 732bf455a2 | |||
| 777181a7ff | |||
| 6e7897b002 | |||
| baa67506f8 | |||
| 7821ec65a0 | |||
| f59710b8d0 | |||
| 1e63174d43 | |||
| 72017223f3 | |||
|
beee80e9b5
|
|||
| f1fd181459 | |||
| e97c28e0fb | |||
| 3f8f04a711 | |||
| c68534b9e7 | |||
| 1ddcd888a4 | |||
| d0df6c5c67 | |||
| 437ba24efb | |||
| 40250eaf31 | |||
| 15faf1d9de | |||
| 66aa7382bc | |||
|
25e4a6e532
|
|||
| 3a4542ebd4 | |||
| 067047ceb0 | |||
| 38db6bcdaf | |||
| 9fa5d7dc00 | |||
| b3fcfcfa09 | |||
| 1710861eb3 | |||
| 70549110c0 | |||
|
93b98ea90d
|
|||
| f45a4e4d8e | |||
| 57619c277d | |||
| 7a9275a49e | |||
| 48012b885b | |||
| b8a7c19e0a | |||
| d043dc80be | |||
| e83fcdfe29 | |||
|
fba19fe073
|
|||
| 4216bad619 | |||
| 03f0d37cd6 | |||
| 87f21b0e03 | |||
| 988fed6179 | |||
|
773c75be9b
|
@@ -13,7 +13,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
renovate:
|
renovate:
|
||||||
runs-on: ubuntu-js
|
runs-on: ubuntu-js
|
||||||
container: ghcr.io/renovatebot/renovate:43.143.1@sha256:2dd226666c8ef3413813e67ff6b34d40512997e99af896e51fe2b24eebf5150b
|
container: ghcr.io/renovatebot/renovate:43.146.0@sha256:e49415ea6907adcd033abff4ea8283bd78d8f5d9aeee7b148a10127463e548a8
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||||
|
|||||||
108
clusters/cl01tl/helm/argocd/templates/prometheus-rule.yaml
Normal file
108
clusters/cl01tl/helm/argocd/templates/prometheus-rule.yaml
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: haproxy
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: haproxy
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: EmbeddedExporter
|
||||||
|
rules:
|
||||||
|
- alert: HAProxyHighHTTP4xxErrorRateBackend
|
||||||
|
expr: ((sum by (proxy) (rate(haproxy_server_http_responses_total{code="4xx"}[1m])) / sum by (proxy) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (proxy) (rate(haproxy_server_http_responses_total[1m])) > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy high HTTP 4xx error rate backend (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many HTTP requests with status 4xx (> 5%) on backend {{ `{{ $labels.proxy }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyHighHTTP5xxErrorRateBackend
|
||||||
|
expr: ((sum by (proxy) (rate(haproxy_server_http_responses_total{code="5xx"}[1m])) / sum by (proxy) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (proxy) (rate(haproxy_server_http_responses_total[1m])) > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy high HTTP 5xx error rate backend (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many HTTP requests with status 5xx (> 5%) on backend {{ `{{ $labels.proxy }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyHighHTTP4xxErrorRateServer
|
||||||
|
expr: ((sum by (server) (rate(haproxy_server_http_responses_total{code="4xx"}[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy high HTTP 4xx error rate server (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many HTTP requests with status 4xx (> 5%) on server {{ `{{ $labels.server }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyHighHTTP5xxErrorRateServer
|
||||||
|
expr: ((sum by (server) (rate(haproxy_server_http_responses_total{code="5xx"}[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy high HTTP 5xx error rate server (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many HTTP requests with status 5xx (> 5%) on server {{ `{{ $labels.server }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyServerResponseErrors
|
||||||
|
expr: (sum by (server) (rate(haproxy_server_response_errors_total[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100 > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy server response errors (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many response errors to {{ `{{ $labels.server }}` }} server (> 5%).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyBackendConnectionErrors
|
||||||
|
expr: (sum by (proxy) (rate(haproxy_backend_connection_errors_total[1m]))) > 100
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy backend connection errors (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many connection errors to {{ `{{ $labels.proxy }}` }} backend (> 100 req/s). Request throughput may be too high.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyServerConnectionErrors
|
||||||
|
expr: (sum by (proxy) (rate(haproxy_server_connection_errors_total[1m]))) > 100
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy server connection errors (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Too many connection errors to {{ `{{ $labels.proxy }}` }} (> 100 req/s). Request throughput may be too high.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyBackendMaxActiveSession>80%
|
||||||
|
expr: (haproxy_backend_current_sessions / haproxy_backend_limit_sessions * 100) > 80 and haproxy_backend_limit_sessions > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy backend max active session > 80% (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Session limit from backend {{ `{{ $labels.proxy }}` }} reached 80% of limit - {{ `{{ $value | printf \"%.2f\"}}` }}%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyPendingRequests
|
||||||
|
expr: sum by (proxy) (haproxy_backend_current_queue) > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy pending requests (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Some HAProxy requests are pending on {{ `{{ $labels.proxy }}` }} - {{ `{{ $value | printf \"%.2f\"}}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyRetryHigh
|
||||||
|
expr: sum by (proxy) (rate(haproxy_backend_retry_warnings_total[1m])) > 10
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy retry high (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "High rate of retry on {{ `{{ $labels.proxy }}` }} - {{ `{{ $value | printf \"%.2f\"}}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyFrontendSecurityBlockedRequests
|
||||||
|
expr: sum by (proxy) (rate(haproxy_frontend_denied_connections_total[2m])) > 10
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy frontend security blocked requests (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "HAProxy is blocking requests for security reason\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: HAProxyServerHealthcheckFailure
|
||||||
|
expr: increase(haproxy_server_check_failures_total[1m]) > 2
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: HAProxy server healthcheck failure (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Some server healthcheck are failing on {{ `{{ $labels.server }}` }} ({{ `{{ $value }}` }} in the last 1m)\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
@@ -103,7 +103,7 @@ argo-cd:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: haproxy
|
repository: haproxy
|
||||||
tag: 3.3.6-alpine@sha256:4f97a2cb7f02fd08402259e74a65ef12fcfa3dff1ef78fddecb5228a17b7f4ad
|
tag: 3.3.7-alpine@sha256:2afa53c856e4e9fcc7dfb35b807fcb189896d7e62b38d363f9bedea92bce7f9a
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 5m
|
cpu: 5m
|
||||||
|
|||||||
@@ -32,4 +32,4 @@ dependencies:
|
|||||||
repository: oci://harbor.alexlebens.net/helm-charts
|
repository: oci://harbor.alexlebens.net/helm-charts
|
||||||
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/audiobookshelf.png
|
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/audiobookshelf.png
|
||||||
# renovate: datasource=github-releases depName=advplyr/audiobookshelf
|
# renovate: datasource=github-releases depName=advplyr/audiobookshelf
|
||||||
appVersion: 2.33.2
|
appVersion: 2.34.0
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ audiobookshelf:
|
|||||||
main:
|
main:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/advplyr/audiobookshelf
|
repository: ghcr.io/advplyr/audiobookshelf
|
||||||
tag: 2.33.2@sha256:a44ed89b3e845faa1f7d353f2cc89b2fcd8011737dd14075fa963cf9468da3a5
|
tag: 2.34.0@sha256:4143292c530f6ac6700afd13360c04f477e4f1a81c1c97c4224b1c7e4330c5c4
|
||||||
env:
|
env:
|
||||||
- name: TZ
|
- name: TZ
|
||||||
value: America/Chicago
|
value: America/Chicago
|
||||||
|
|||||||
@@ -0,0 +1,44 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: cert-manager
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: cert-manager
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: EmbeddedExporter
|
||||||
|
rules:
|
||||||
|
- alert: Cert-ManagerAbsent
|
||||||
|
expr: absent(up{job="cert-manager"})
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Cert-Manager absent (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Cert-Manager has disappeared from Prometheus service discovery. New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: Cert-ManagerCertificateExpiringSoon
|
||||||
|
expr: avg by (exported_namespace, namespace, name) (certmanager_certificate_expiration_timestamp_seconds - time()) < (21 * 24 * 3600)
|
||||||
|
for: 1h
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Cert-Manager certificate expiring soon (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The certificate {{ `{{ $labels.name }}` }} is expiring in less than 21 days.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: Cert-ManagerCertificateNotReady
|
||||||
|
expr: max by (name, exported_namespace, namespace, condition) (certmanager_certificate_ready_status{condition!="True"} == 1)
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Cert-Manager certificate not ready (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The certificate {{ `{{ $labels.name }}` }} in namespace {{ `{{ $labels.exported_namespace }}` }} is not ready to serve traffic.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: Cert-ManagerHittingACMERateLimits
|
||||||
|
expr: sum by (host) (rate(certmanager_acme_client_request_count{status="429"}[5m])) > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Cert-Manager hitting ACME rate limits (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Cert-Manager is being rate-limited by the ACME provider. Certificate issuance and renewal may be blocked for up to a week.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
@@ -18,7 +18,7 @@ rclone-postgres-backups-remote:
|
|||||||
nameOverride: postgres-backups-remote-rclone
|
nameOverride: postgres-backups-remote-rclone
|
||||||
cronJob:
|
cronJob:
|
||||||
suspend: false
|
suspend: false
|
||||||
schedule: 0 1 * * *
|
schedule: 0 6 * * 6
|
||||||
rclone:
|
rclone:
|
||||||
source:
|
source:
|
||||||
bucketName: postgres-backups
|
bucketName: postgres-backups
|
||||||
@@ -45,7 +45,7 @@ rclone-postgres-backups-external:
|
|||||||
nameOverride: postgres-backups-external-rclone
|
nameOverride: postgres-backups-external-rclone
|
||||||
cronJob:
|
cronJob:
|
||||||
suspend: true
|
suspend: true
|
||||||
schedule: 20 1 * * *
|
schedule: 0 6 * * 6
|
||||||
rclone:
|
rclone:
|
||||||
source:
|
source:
|
||||||
bucketName: openbao-backups
|
bucketName: openbao-backups
|
||||||
|
|||||||
@@ -42,4 +42,4 @@ dependencies:
|
|||||||
repository: oci://harbor.alexlebens.net/helm-charts
|
repository: oci://harbor.alexlebens.net/helm-charts
|
||||||
icon: https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/dawarich.png
|
icon: https://cdn.jsdelivr.net/gh/selfhst/icons@main/png/dawarich.png
|
||||||
# renovate: datasource=github-releases depName=Freika/dawarich
|
# renovate: datasource=github-releases depName=Freika/dawarich
|
||||||
appVersion: 1.6.1
|
appVersion: 1.7.0
|
||||||
|
|||||||
@@ -15,6 +15,18 @@ spec:
|
|||||||
remoteRef:
|
remoteRef:
|
||||||
key: /cl01tl/dawarich/key
|
key: /cl01tl/dawarich/key
|
||||||
property: key
|
property: key
|
||||||
|
- secretKey: otp-primary-key
|
||||||
|
remoteRef:
|
||||||
|
key: /cl01tl/dawarich/key
|
||||||
|
property: otp-primary-key
|
||||||
|
- secretKey: otp-deterministic-key
|
||||||
|
remoteRef:
|
||||||
|
key: /cl01tl/dawarich/key
|
||||||
|
property: otp-deterministic-key
|
||||||
|
- secretKey: otp-derivation-salt
|
||||||
|
remoteRef:
|
||||||
|
key: /cl01tl/dawarich/key
|
||||||
|
property: otp-derivation-salt
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: external-secrets.io/v1
|
apiVersion: external-secrets.io/v1
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ dawarich:
|
|||||||
main:
|
main:
|
||||||
image:
|
image:
|
||||||
repository: freikin/dawarich
|
repository: freikin/dawarich
|
||||||
tag: 1.6.1@sha256:a884f69f19ce0f66992f3872d24544d1e587e133b8a003e072711aafc1e02429
|
tag: 1.7.0@sha256:7d5f99c61121fcfa4cbdd6a153392630d9f059ffb0156759278d3e049085ec62
|
||||||
command:
|
command:
|
||||||
- "web-entrypoint.sh"
|
- "web-entrypoint.sh"
|
||||||
args:
|
args:
|
||||||
@@ -83,6 +83,21 @@ dawarich:
|
|||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: dawarich-key
|
name: dawarich-key
|
||||||
key: key
|
key: key
|
||||||
|
- name: OTP_ENCRYPTION_PRIMARY_KEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: dawarich-key
|
||||||
|
key: otp-primary-key
|
||||||
|
- name: OTP_ENCRYPTION_DETERMINISTIC_KEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: dawarich-key
|
||||||
|
key: otp-deterministic-key
|
||||||
|
- name: OTP_ENCRYPTION_KEY_DERIVATION_SALT
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: dawarich-key
|
||||||
|
key: otp-derivation-salt
|
||||||
- name: RAILS_LOG_TO_STDOUT
|
- name: RAILS_LOG_TO_STDOUT
|
||||||
value: true
|
value: true
|
||||||
- name: SELF_HOSTED
|
- name: SELF_HOSTED
|
||||||
@@ -111,7 +126,7 @@ dawarich:
|
|||||||
sidekiq:
|
sidekiq:
|
||||||
image:
|
image:
|
||||||
repository: freikin/dawarich
|
repository: freikin/dawarich
|
||||||
tag: 1.6.1@sha256:a884f69f19ce0f66992f3872d24544d1e587e133b8a003e072711aafc1e02429
|
tag: 1.7.0@sha256:7d5f99c61121fcfa4cbdd6a153392630d9f059ffb0156759278d3e049085ec62
|
||||||
command:
|
command:
|
||||||
- "sidekiq-entrypoint.sh"
|
- "sidekiq-entrypoint.sh"
|
||||||
args:
|
args:
|
||||||
@@ -161,12 +176,12 @@ dawarich:
|
|||||||
- name: OIDC_CLIENT_ID
|
- name: OIDC_CLIENT_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: dawarich-oidc-secret
|
name: dawarich-oidc-authentik
|
||||||
key: client
|
key: client
|
||||||
- name: OIDC_CLIENT_SECRET
|
- name: OIDC_CLIENT_SECRET
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: dawarich-oidc-secret
|
name: dawarich-oidc-authentik
|
||||||
key: secret
|
key: secret
|
||||||
- name: OIDC_PROVIDER_NAME
|
- name: OIDC_PROVIDER_NAME
|
||||||
value: Authentik
|
value: Authentik
|
||||||
@@ -181,8 +196,23 @@ dawarich:
|
|||||||
- name: SECRET_KEY_BASE
|
- name: SECRET_KEY_BASE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: dawarich-key-secret
|
name: dawarich-key
|
||||||
key: key
|
key: key
|
||||||
|
- name: OTP_ENCRYPTION_PRIMARY_KEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: dawarich-key
|
||||||
|
key: otp-primary-key
|
||||||
|
- name: OTP_ENCRYPTION_DETERMINISTIC_KEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: dawarich-key
|
||||||
|
key: otp-deterministic-key
|
||||||
|
- name: OTP_ENCRYPTION_KEY_DERIVATION_SALT
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: dawarich-key
|
||||||
|
key: otp-derivation-salt
|
||||||
- name: RAILS_LOG_TO_STDOUT
|
- name: RAILS_LOG_TO_STDOUT
|
||||||
value: true
|
value: true
|
||||||
- name: SELF_HOSTED
|
- name: SELF_HOSTED
|
||||||
|
|||||||
@@ -47,6 +47,8 @@ democratic-csi:
|
|||||||
fsType: ext4
|
fsType: ext4
|
||||||
node:
|
node:
|
||||||
hostPID: true
|
hostPID: true
|
||||||
|
rbac:
|
||||||
|
enabled: true
|
||||||
driver:
|
driver:
|
||||||
extraEnv:
|
extraEnv:
|
||||||
- name: ISCSIADM_HOST_STRATEGY
|
- name: ISCSIADM_HOST_STRATEGY
|
||||||
|
|||||||
28
clusters/cl01tl/helm/gitea/templates/prometheus-rule.yaml
Normal file
28
clusters/cl01tl/helm/gitea/templates/prometheus-rule.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: meilisearch
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: meilisearch
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: EmbeddedExporter
|
||||||
|
rules:
|
||||||
|
- alert: MeilisearchIndexIsEmpty
|
||||||
|
expr: meilisearch_index_docs_count == 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Meilisearch index is empty (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Meilisearch index {{ `{{ $labels.index }}` }} has zero documents\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: MeilisearchHttpResponseTime
|
||||||
|
expr: meilisearch_http_response_time_seconds > 0.5
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Meilisearch http response time (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Meilisearch http response time is too high\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
28
clusters/cl01tl/helm/jellyfin/templates/prometheus-rule.yaml
Normal file
28
clusters/cl01tl/helm/jellyfin/templates/prometheus-rule.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: meilisearch
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: meilisearch
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: EmbeddedExporter
|
||||||
|
rules:
|
||||||
|
- alert: MeilisearchIndexIsEmpty
|
||||||
|
expr: meilisearch_index_docs_count == 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Meilisearch index is empty (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Meilisearch index {{ `{{ $labels.index }}` }} has zero documents\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: MeilisearchHttpResponseTime
|
||||||
|
expr: meilisearch_http_response_time_seconds > 0.5
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Meilisearch http response time (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Meilisearch http response time is too high\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
28
clusters/cl01tl/helm/karakeep/templates/prometheus-rule.yaml
Normal file
28
clusters/cl01tl/helm/karakeep/templates/prometheus-rule.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: meilisearch
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: meilisearch
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: EmbeddedExporter
|
||||||
|
rules:
|
||||||
|
- alert: MeilisearchIndexIsEmpty
|
||||||
|
expr: meilisearch_index_docs_count == 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Meilisearch index is empty (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Meilisearch index {{ `{{ $labels.index }}` }} has zero documents\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: MeilisearchHttpResponseTime
|
||||||
|
expr: meilisearch_http_response_time_seconds > 0.5
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Meilisearch http response time (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Meilisearch http response time is too high\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
dependencies:
|
dependencies:
|
||||||
- name: kube-prometheus-stack
|
- name: kube-prometheus-stack
|
||||||
repository: oci://ghcr.io/prometheus-community/charts
|
repository: oci://ghcr.io/prometheus-community/charts
|
||||||
version: 84.2.1
|
version: 84.3.0
|
||||||
- name: prometheus-operator-crds
|
- name: prometheus-operator-crds
|
||||||
repository: oci://ghcr.io/prometheus-community/charts
|
repository: oci://ghcr.io/prometheus-community/charts
|
||||||
version: 28.0.1
|
version: 28.0.1
|
||||||
@@ -11,5 +11,5 @@ dependencies:
|
|||||||
- name: valkey
|
- name: valkey
|
||||||
repository: oci://harbor.alexlebens.net/helm-charts
|
repository: oci://harbor.alexlebens.net/helm-charts
|
||||||
version: 0.6.1
|
version: 0.6.1
|
||||||
digest: sha256:4d3525495bf1c15bf3508fc9948fa46b2194b8e0439f4aeaf27bf855e9964f65
|
digest: sha256:88beedf9486adb9cb27b36c24021759401fcff106fc0e0cadbb3282d7e57d03c
|
||||||
generated: "2026-04-27T16:01:33.493574911Z"
|
generated: "2026-04-27T19:03:58.288039768Z"
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ maintainers:
|
|||||||
- name: alexlebens
|
- name: alexlebens
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kube-prometheus-stack
|
- name: kube-prometheus-stack
|
||||||
version: 84.2.1
|
version: 84.3.0
|
||||||
repository: oci://ghcr.io/prometheus-community/charts
|
repository: oci://ghcr.io/prometheus-community/charts
|
||||||
- name: prometheus-operator-crds
|
- name: prometheus-operator-crds
|
||||||
version: 28.0.1
|
version: 28.0.1
|
||||||
|
|||||||
@@ -98,8 +98,8 @@ kube-prometheus-stack:
|
|||||||
namespace: traefik
|
namespace: traefik
|
||||||
prometheusSpec:
|
prometheusSpec:
|
||||||
scrapeInterval: 30s
|
scrapeInterval: 30s
|
||||||
retention: 45d
|
retention: 60d
|
||||||
retentionSize: 240GiB
|
retentionSize: 450GiB
|
||||||
externalUrl: https://prometheus.alexlebens.net
|
externalUrl: https://prometheus.alexlebens.net
|
||||||
ruleSelectorNilUsesHelmValues: false
|
ruleSelectorNilUsesHelmValues: false
|
||||||
serviceMonitorSelectorNilUsesHelmValues: false
|
serviceMonitorSelectorNilUsesHelmValues: false
|
||||||
@@ -112,7 +112,7 @@ kube-prometheus-stack:
|
|||||||
accessModes: ["ReadWriteOnce"]
|
accessModes: ["ReadWriteOnce"]
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 250Gi
|
storage: 500Gi
|
||||||
ntfy-alertmanager:
|
ntfy-alertmanager:
|
||||||
global:
|
global:
|
||||||
fullnameOverride: ntfy-alertmanager
|
fullnameOverride: ntfy-alertmanager
|
||||||
|
|||||||
@@ -120,20 +120,52 @@ openbao:
|
|||||||
prometheusRules:
|
prometheusRules:
|
||||||
enabled: true
|
enabled: true
|
||||||
rules:
|
rules:
|
||||||
- alert: vault-HighResponseTime
|
- alert: openBao-HighResponseTime
|
||||||
annotations:
|
annotations:
|
||||||
message: The response time of Vault is over 500ms on average over the last 5 minutes.
|
message: The response time of OpenBao is over 500ms on average over the last 5 minutes.
|
||||||
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500
|
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500
|
||||||
for: 5m
|
for: 5m
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
- alert: vault-HighResponseTime
|
- alert: openBao-HighResponseTime
|
||||||
annotations:
|
annotations:
|
||||||
message: The response time of Vault is over 1s on average over the last 5 minutes.
|
message: The response time of OpenBao is over 1s on average over the last 5 minutes.
|
||||||
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000
|
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000
|
||||||
for: 5m
|
for: 5m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
|
- alert: openBao-Sealed
|
||||||
|
expr: vault_core_unsealed == 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: OpenBao sealed (instance {{ $labels.instance }})
|
||||||
|
description: "OpenBao instance is sealed on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
|
- alert: OpenBao-TooManyPendingTokens
|
||||||
|
expr: avg(vault_token_create_count - vault_token_store_count) > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: OpenBao too many pending tokens (instance {{ $labels.instance }})
|
||||||
|
description: "Too many pending tokens on {{ $labels.instance }}: {{ $value }} tokens created but not yet stored.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
|
- alert: OpenBao-TooManyInfinityTokens
|
||||||
|
expr: vault_token_count_by_ttl{creation_ttl="+Inf"} > 3
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: OpenBao too many infinity tokens (instance {{ $labels.instance }})
|
||||||
|
description: "Too many non-expiring tokens on {{ $labels.instance }}: {{ $value }} tokens with infinite TTL.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
|
- alert: OpenBao-ClusterHealth
|
||||||
|
expr: sum(vault_core_active) / count(vault_core_active) <= 0.5 and count(vault_core_active) > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: OpenBao cluster health (instance {{ $labels.instance }})
|
||||||
|
description: "OpenBao cluster is not healthy: only {{ $value | humanizePercentage }} of nodes are active.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
snapshotAgent:
|
snapshotAgent:
|
||||||
enabled: true
|
enabled: true
|
||||||
schedule: 0 4 * * *
|
schedule: 0 4 * * *
|
||||||
|
|||||||
@@ -48,4 +48,4 @@ dependencies:
|
|||||||
repository: oci://harbor.alexlebens.net/helm-charts
|
repository: oci://harbor.alexlebens.net/helm-charts
|
||||||
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/paperless-ngx.png
|
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/paperless-ngx.png
|
||||||
# renovate: datasource=github-releases depName=paperless-ngx/paperless-ngx
|
# renovate: datasource=github-releases depName=paperless-ngx/paperless-ngx
|
||||||
appVersion: 2.20.14
|
appVersion: 2.20.15
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ paperless-ngx:
|
|||||||
main:
|
main:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/paperless-ngx/paperless-ngx
|
repository: ghcr.io/paperless-ngx/paperless-ngx
|
||||||
tag: 2.20.14@sha256:b89f83345532cfba72690185257eb6c4f92fc2a782332a42abe19c07b7a6595f
|
tag: 2.20.15@sha256:6c86cad803970ea782683a8e80e7403444c5bf3cf70de63b4d3c8e87500db92f
|
||||||
env:
|
env:
|
||||||
- name: PAPERLESS_REDIS
|
- name: PAPERLESS_REDIS
|
||||||
value: redis://paperless-ngx-valkey.paperless-ngx:6379
|
value: redis://paperless-ngx-valkey.paperless-ngx:6379
|
||||||
|
|||||||
@@ -20,4 +20,4 @@ dependencies:
|
|||||||
version: 4.6.2
|
version: 4.6.2
|
||||||
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/plex.png
|
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/plex.png
|
||||||
# renovate: datasource=github-releases depName=linuxserver/docker-plex
|
# renovate: datasource=github-releases depName=linuxserver/docker-plex
|
||||||
appVersion: 1.43.1.10611-1e34174b1-ls302
|
appVersion: 1.43.1.10611-1e34174b1-ls303
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ plex:
|
|||||||
main:
|
main:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/linuxserver/plex
|
repository: ghcr.io/linuxserver/plex
|
||||||
tag: 1.43.1.10611-1e34174b1-ls302@sha256:e5c7c283b242966416a4bed2d666acf6f3fb8f957c704be8333f8dc987364825
|
tag: 1.43.1.10611-1e34174b1-ls303@sha256:b785bdd60e781662f16e0526a6b54c07856739df95ab558a674a3c084dbde423
|
||||||
env:
|
env:
|
||||||
- name: TZ
|
- name: TZ
|
||||||
value: America/Chicago
|
value: America/Chicago
|
||||||
|
|||||||
156
clusters/cl01tl/helm/rybbit/templates/prometheus-rule.yaml
Normal file
156
clusters/cl01tl/helm/rybbit/templates/prometheus-rule.yaml
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: clickhouse
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: clickhouse
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: EmbeddedExporter
|
||||||
|
rules:
|
||||||
|
- alert: ClickHouseNodeDown
|
||||||
|
expr: up{job="clickhouse"} == 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse node down (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "No metrics received from ClickHouse exporter for over 2 minutes.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseMemoryUsageCritical
|
||||||
|
expr: ClickHouseAsyncMetrics_CGroupMemoryUsed / ClickHouseAsyncMetrics_CGroupMemoryTotal * 100 > 90 and ClickHouseAsyncMetrics_CGroupMemoryTotal > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Memory Usage Critical (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Memory usage is critically high, over 90%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseMemoryUsageWarning
|
||||||
|
expr: ClickHouseAsyncMetrics_CGroupMemoryUsed / ClickHouseAsyncMetrics_CGroupMemoryTotal * 100 > 80 and ClickHouseAsyncMetrics_CGroupMemoryTotal > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Memory Usage Warning (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Memory usage is over 80%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseDiskSpaceLowOnDefault
|
||||||
|
expr: ClickHouseAsyncMetrics_DiskAvailable_default / (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) * 100 < 20 and (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Disk Space Low on Default (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Disk space on default is below 20%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseDiskSpaceCriticalOnDefault
|
||||||
|
expr: ClickHouseAsyncMetrics_DiskAvailable_default / (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) * 100 < 10 and (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Disk Space Critical on Default (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Disk space on default disk is critically low, below 10%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseDiskSpaceLowOnBackups
|
||||||
|
expr: ClickHouseAsyncMetrics_DiskAvailable_backups / (ClickHouseAsyncMetrics_DiskAvailable_backups + ClickHouseAsyncMetrics_DiskUsed_backups) * 100 < 20 and (ClickHouseAsyncMetrics_DiskAvailable_backups + ClickHouseAsyncMetrics_DiskUsed_backups) > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Disk Space Low on Backups (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Disk space on backups is below 20%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseReplicaErrors
|
||||||
|
expr: ClickHouseErrorMetric_ALL_REPLICAS_ARE_STALE == 1 or ClickHouseErrorMetric_ALL_REPLICAS_LOST == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Replica Errors (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Critical replica errors detected, either all replicas are stale or lost.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseNoAvailableReplicas
|
||||||
|
expr: ClickHouseErrorMetric_NO_AVAILABLE_REPLICA == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse No Available Replicas (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "No available replicas in ClickHouse.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseNoLiveReplicas
|
||||||
|
expr: ClickHouseErrorMetric_TOO_FEW_LIVE_REPLICAS == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse No Live Replicas (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "There are too few live replicas available, risking data loss and service disruption.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseHighTCPConnections
|
||||||
|
expr: ClickHouseMetrics_TCPConnection > 400
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse High TCP Connections (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "High number of TCP connections, indicating heavy client or inter-cluster communication.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseInterserverConnectionIssues
|
||||||
|
expr: ClickHouseMetrics_InterserverConnection > 50
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Interserver Connection Issues (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "High number of interserver connections may indicate replication or distributed query handling issues.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseZooKeeperConnectionIssues
|
||||||
|
expr: ClickHouseMetrics_ZooKeeperSession != 1
|
||||||
|
for: 3m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse ZooKeeper Connection Issues (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "ClickHouse is experiencing issues with ZooKeeper connections, which may affect cluster state and coordination.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseAuthenticationFailures
|
||||||
|
expr: increase(ClickHouseErrorMetric_AUTHENTICATION_FAILED[5m]) > 3
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Authentication Failures (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Authentication failures detected, indicating potential security issues or misconfiguration.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseAccessDeniedErrors
|
||||||
|
expr: increase(ClickHouseErrorMetric_RESOURCE_ACCESS_DENIED[5m]) > 3
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse Access Denied Errors (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Access denied errors have been logged, which could indicate permission issues or unauthorized access attempts.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseRejectedInsertQueries
|
||||||
|
expr: increase(ClickHouseProfileEvents_RejectedInserts[1m]) > 2
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse rejected insert queries (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "INSERTs rejected due to too many active data parts. Reduce insert frequency.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseDelayedInsertQueries
|
||||||
|
expr: increase(ClickHouseProfileEvents_DelayedInserts[5m]) > 10
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse delayed insert queries (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "INSERTs delayed due to high number of active parts.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseZookeeperHardwareException
|
||||||
|
expr: increase(ClickHouseProfileEvents_ZooKeeperHardwareExceptions[1m]) > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse zookeeper hardware exception (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Zookeeper hardware exception: network issues communicating with ZooKeeper\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ClickHouseDistributedRejectedInserts
|
||||||
|
expr: increase(ClickHouseProfileEvents_DistributedRejectedInserts[5m]) > 3
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: ClickHouse distributed rejected inserts (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "INSERTs into Distributed tables rejected due to pending bytes limit.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
@@ -150,19 +150,20 @@ rybbit:
|
|||||||
data:
|
data:
|
||||||
network.xml: |
|
network.xml: |
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<listen_host>0.0.0.0</listen_host>
|
<listen_host>0.0.0.0</listen_host>
|
||||||
|
<http_port>8123</http_port>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
enable_json.xml: |
|
enable_json.xml: |
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<settings>
|
<settings>
|
||||||
<enable_json_type>1</enable_json_type>
|
<enable_json_type>1</enable_json_type>
|
||||||
</settings>
|
</settings>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
logging_rules.xml: |
|
logging_rules.xml: |
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<logger>
|
<logger>
|
||||||
<level>warning</level>
|
<level>warning</level>
|
||||||
<console>true</console>
|
<console>true</console>
|
||||||
</logger>
|
</logger>
|
||||||
<query_thread_log remove="remove"/>
|
<query_thread_log remove="remove"/>
|
||||||
<query_log remove="remove"/>
|
<query_log remove="remove"/>
|
||||||
@@ -185,6 +186,17 @@ rybbit:
|
|||||||
</default>
|
</default>
|
||||||
</profiles>
|
</profiles>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
metrics.xml: |
|
||||||
|
<clickhouse>
|
||||||
|
<prometheus>
|
||||||
|
<endpoint>/metrics</endpoint>
|
||||||
|
<port>9363</port>
|
||||||
|
<metrics>true</metrics>
|
||||||
|
<events>true</events>
|
||||||
|
<asynchronous_metrics>true</asynchronous_metrics>
|
||||||
|
<errors>true</errors>
|
||||||
|
</prometheus>
|
||||||
|
</clickhouse>
|
||||||
service:
|
service:
|
||||||
backend:
|
backend:
|
||||||
controller: backend
|
controller: backend
|
||||||
@@ -204,6 +216,21 @@ rybbit:
|
|||||||
http:
|
http:
|
||||||
port: 8123
|
port: 8123
|
||||||
targetPort: 8123
|
targetPort: 8123
|
||||||
|
metrics:
|
||||||
|
port: 9363
|
||||||
|
targetPort: 9363
|
||||||
|
serviceMonitor:
|
||||||
|
main:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: rybbit-clickhouse
|
||||||
|
app.kubernetes.io/instance: rybbit-clickhouse
|
||||||
|
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
|
||||||
|
endpoints:
|
||||||
|
- port: metrics
|
||||||
|
interval: 30s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
path: /metrics
|
||||||
persistence:
|
persistence:
|
||||||
clickhouse:
|
clickhouse:
|
||||||
forceRename: clickhouse-data
|
forceRename: clickhouse-data
|
||||||
@@ -238,6 +265,10 @@ rybbit:
|
|||||||
readOnly: true
|
readOnly: true
|
||||||
mountPropagation: None
|
mountPropagation: None
|
||||||
subPath: user_logging.xml
|
subPath: user_logging.xml
|
||||||
|
- path: /etc/clickhouse-server/config.d/metrics.xml
|
||||||
|
readOnly: true
|
||||||
|
mountPropagation: None
|
||||||
|
subPath: metrics.xml
|
||||||
postgres-18-cluster:
|
postgres-18-cluster:
|
||||||
mode: recovery
|
mode: recovery
|
||||||
recovery:
|
recovery:
|
||||||
|
|||||||
@@ -3,12 +3,12 @@ secrets-store-csi-driver:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/csi-secrets-store/driver
|
repository: registry.k8s.io/csi-secrets-store/driver
|
||||||
tag: v1.5.6@sha256:6df2b3b3817136d2ade3d53306dbbd98385c1c01e8b3c373192c0e5b8d183f7b
|
tag: v1.6.0@sha256:110344819630bfd41e0c6d3f215d325ad1a4d5d5b1d298f8af7d0edf4df64a4e
|
||||||
crds:
|
crds:
|
||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/csi-secrets-store/driver-crds
|
repository: registry.k8s.io/csi-secrets-store/driver-crds
|
||||||
tag: v1.5.6@sha256:d40d9212beb62ee0f9f09b75d024ed807816879f38e75eca309497c3df89568c
|
tag: v1.6.0@sha256:2419b318a1c17bd741686bf1994cd37cee7162039c019435b8f534f2846fe488
|
||||||
driver:
|
driver:
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ site-documentation:
|
|||||||
main:
|
main:
|
||||||
image:
|
image:
|
||||||
repository: harbor.alexlebens.net/images/site-documentation
|
repository: harbor.alexlebens.net/images/site-documentation
|
||||||
tag: 0.27.1@sha256:a9e8659827375e7ee65ea8bc8550f4c0604316b48f39da7fa255fa9f3b5a17d6
|
tag: 0.28.0@sha256:dabb2c9a8c306a01ccf1d85e797f6a5cc81d8d3b5db8d28ab1b5969f1b56cf74
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ site-profile:
|
|||||||
main:
|
main:
|
||||||
image:
|
image:
|
||||||
repository: harbor.alexlebens.net/images/site-profile
|
repository: harbor.alexlebens.net/images/site-profile
|
||||||
tag: 3.18.6@sha256:6aacdb7270d21b02d85cd593999014c91614e70c8f6f84774e532f9141237a6c
|
tag: 3.19.1@sha256:bf8f7f065867c605fe42955f12aaec68c5d1e667a3325bb30ad6d028b523bcd5
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
|
|||||||
169
clusters/cl01tl/helm/stalwart/templates/prometheus-rule.yaml
Normal file
169
clusters/cl01tl/helm/stalwart/templates/prometheus-rule.yaml
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: elasticsearch
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: ElasticsearchExporter
|
||||||
|
rules:
|
||||||
|
- alert: ElasticsearchHeapUsageTooHigh
|
||||||
|
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Heap Usage Too High (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The heap usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchHeapUsageWarning
|
||||||
|
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Heap Usage warning (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The heap usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchDiskOutOfSpace
|
||||||
|
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch disk out of space (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The disk usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchDiskSpaceLow
|
||||||
|
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch disk space low (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The disk usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchClusterRed
|
||||||
|
expr: elasticsearch_cluster_health_status{color="red"} == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Cluster Red (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elastic Cluster Red status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchClusterYellow
|
||||||
|
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Cluster Yellow (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elastic Cluster Yellow status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# 1m delay allows a restart without triggering an alert.
|
||||||
|
- alert: ElasticsearchHealthyNodes
|
||||||
|
expr: elasticsearch_cluster_health_number_of_nodes < 3
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Healthy Nodes (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Missing node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# 1m delay allows a restart without triggering an alert.
|
||||||
|
- alert: ElasticsearchHealthyDataNodes
|
||||||
|
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Healthy Data Nodes (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchRelocatingShards
|
||||||
|
expr: elasticsearch_cluster_health_relocating_shards > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch relocating shards (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch is relocating shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchRelocatingShardsTooLong
|
||||||
|
expr: elasticsearch_cluster_health_relocating_shards > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch relocating shards too long (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchInitializingShards
|
||||||
|
expr: elasticsearch_cluster_health_initializing_shards > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch initializing shards (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch is initializing shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchInitializingShardsTooLong
|
||||||
|
expr: elasticsearch_cluster_health_initializing_shards > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch initializing shards too long (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchUnassignedShards
|
||||||
|
expr: elasticsearch_cluster_health_unassigned_shards > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch unassigned shards (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has unassigned shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchPendingTasks
|
||||||
|
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch pending tasks (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchNoNewDocuments
|
||||||
|
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch no new documents (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "No new documents for 10 min!\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# Threshold of 10ms (0.01s) per indexing operation is a rough default. Adjust based on your document size and cluster performance.
|
||||||
|
- alert: ElasticsearchHighIndexingLatency
|
||||||
|
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Indexing Latency (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# Threshold of 10000 ops/s is a rough default. Adjust based on your cluster capacity and expected workload.
|
||||||
|
- alert: ElasticsearchHighIndexingRate
|
||||||
|
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Indexing Rate (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# Threshold of 100 queries/s is very low for most production clusters. Adjust based on your expected query volume.
|
||||||
|
- alert: ElasticsearchHighQueryRate
|
||||||
|
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Query Rate (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchHighQueryLatency
|
||||||
|
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Query Latency (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
stalwart:
|
stalwart:
|
||||||
controllers:
|
controllers:
|
||||||
main:
|
main:
|
||||||
|
forceRename: stalwart
|
||||||
type: deployment
|
type: deployment
|
||||||
replicas: 1
|
replicas: 1
|
||||||
strategy: Recreate
|
strategy: Recreate
|
||||||
@@ -13,9 +14,26 @@ stalwart:
|
|||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
|
metrics:
|
||||||
|
type: deployment
|
||||||
|
replicas: 1
|
||||||
|
strategy: Recreate
|
||||||
|
containers:
|
||||||
|
main:
|
||||||
|
image:
|
||||||
|
repository: quay.io/prometheuscommunity/elasticsearch-exporter
|
||||||
|
tag: v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
|
||||||
|
args:
|
||||||
|
- '--es.uri=https://elasticsearch-stalwart-es-http.tubearchivist:9200'
|
||||||
|
- '--es.ssl-skip-verify'
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 1m
|
||||||
|
memory: 10Mi
|
||||||
service:
|
service:
|
||||||
main:
|
main:
|
||||||
controller: main
|
controller: main
|
||||||
|
forceRename: stalwart
|
||||||
ports:
|
ports:
|
||||||
http:
|
http:
|
||||||
port: 80
|
port: 80
|
||||||
@@ -32,6 +50,24 @@ stalwart:
|
|||||||
imaps:
|
imaps:
|
||||||
port: 993
|
port: 993
|
||||||
targetPort: 993
|
targetPort: 993
|
||||||
|
metrics:
|
||||||
|
controller: metrics
|
||||||
|
ports:
|
||||||
|
metrics:
|
||||||
|
port: 9114
|
||||||
|
targetPort: 9114
|
||||||
|
serviceMonitor:
|
||||||
|
main:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: stalwart-metrics
|
||||||
|
app.kubernetes.io/instance: stalwart-metrics
|
||||||
|
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
|
||||||
|
endpoints:
|
||||||
|
- port: metrics
|
||||||
|
interval: 30s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
path: /metrics
|
||||||
route:
|
route:
|
||||||
main:
|
main:
|
||||||
kind: HTTPRoute
|
kind: HTTPRoute
|
||||||
|
|||||||
@@ -0,0 +1,169 @@
|
|||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PrometheusRule
|
||||||
|
metadata:
|
||||||
|
name: elasticsearch
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: elasticsearch
|
||||||
|
{{- include "custom.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
groups:
|
||||||
|
- name: ElasticsearchExporter
|
||||||
|
rules:
|
||||||
|
- alert: ElasticsearchHeapUsageTooHigh
|
||||||
|
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Heap Usage Too High (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The heap usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchHeapUsageWarning
|
||||||
|
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Heap Usage warning (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The heap usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchDiskOutOfSpace
|
||||||
|
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch disk out of space (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The disk usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchDiskSpaceLow
|
||||||
|
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch disk space low (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The disk usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchClusterRed
|
||||||
|
expr: elasticsearch_cluster_health_status{color="red"} == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Cluster Red (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elastic Cluster Red status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchClusterYellow
|
||||||
|
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Cluster Yellow (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elastic Cluster Yellow status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# 1m delay allows a restart without triggering an alert.
|
||||||
|
- alert: ElasticsearchHealthyNodes
|
||||||
|
expr: elasticsearch_cluster_health_number_of_nodes < 3
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Healthy Nodes (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Missing node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# 1m delay allows a restart without triggering an alert.
|
||||||
|
- alert: ElasticsearchHealthyDataNodes
|
||||||
|
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch Healthy Data Nodes (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchRelocatingShards
|
||||||
|
expr: elasticsearch_cluster_health_relocating_shards > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch relocating shards (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch is relocating shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchRelocatingShardsTooLong
|
||||||
|
expr: elasticsearch_cluster_health_relocating_shards > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch relocating shards too long (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchInitializingShards
|
||||||
|
expr: elasticsearch_cluster_health_initializing_shards > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: info
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch initializing shards (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch is initializing shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchInitializingShardsTooLong
|
||||||
|
expr: elasticsearch_cluster_health_initializing_shards > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch initializing shards too long (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchUnassignedShards
|
||||||
|
expr: elasticsearch_cluster_health_unassigned_shards > 0
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch unassigned shards (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has unassigned shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchPendingTasks
|
||||||
|
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch pending tasks (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchNoNewDocuments
|
||||||
|
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch no new documents (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "No new documents for 10 min!\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# Threshold of 10ms (0.01s) per indexing operation is a rough default. Adjust based on your document size and cluster performance.
|
||||||
|
- alert: ElasticsearchHighIndexingLatency
|
||||||
|
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Indexing Latency (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# Threshold of 10000 ops/s is a rough default. Adjust based on your cluster capacity and expected workload.
|
||||||
|
- alert: ElasticsearchHighIndexingRate
|
||||||
|
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Indexing Rate (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
# Threshold of 100 queries/s is very low for most production clusters. Adjust based on your expected query volume.
|
||||||
|
- alert: ElasticsearchHighQueryRate
|
||||||
|
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Query Rate (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
|
- alert: ElasticsearchHighQueryLatency
|
||||||
|
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Elasticsearch High Query Latency (instance {{ `{{ $labels.instance }}` }})
|
||||||
|
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
tubearchivist:
|
tubearchivist:
|
||||||
controllers:
|
controllers:
|
||||||
main:
|
main:
|
||||||
|
forceRename: tubearchivist
|
||||||
type: deployment
|
type: deployment
|
||||||
replicas: 1
|
replicas: 1
|
||||||
strategy: Recreate
|
strategy: Recreate
|
||||||
@@ -96,13 +97,48 @@ tubearchivist:
|
|||||||
devic.es/tun: "1"
|
devic.es/tun: "1"
|
||||||
requests:
|
requests:
|
||||||
devic.es/tun: "1"
|
devic.es/tun: "1"
|
||||||
|
metrics:
|
||||||
|
type: deployment
|
||||||
|
replicas: 1
|
||||||
|
strategy: Recreate
|
||||||
|
containers:
|
||||||
|
main:
|
||||||
|
image:
|
||||||
|
repository: quay.io/prometheuscommunity/elasticsearch-exporter
|
||||||
|
tag: v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
|
||||||
|
args:
|
||||||
|
- '--es.uri=https://elasticsearch-tubearchivist-es-http.tubearchivist:9200'
|
||||||
|
- '--es.ssl-skip-verify'
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 1m
|
||||||
|
memory: 10Mi
|
||||||
service:
|
service:
|
||||||
main:
|
main:
|
||||||
controller: main
|
controller: main
|
||||||
|
forceRename: tubearchivist
|
||||||
ports:
|
ports:
|
||||||
http:
|
http:
|
||||||
port: 80
|
port: 80
|
||||||
targetPort: 24000
|
targetPort: 24000
|
||||||
|
metrics:
|
||||||
|
controller: metrics
|
||||||
|
ports:
|
||||||
|
metrics:
|
||||||
|
port: 9114
|
||||||
|
targetPort: 9114
|
||||||
|
serviceMonitor:
|
||||||
|
main:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: tubearchivist-metrics
|
||||||
|
app.kubernetes.io/instance: tubearchivist-metrics
|
||||||
|
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
|
||||||
|
endpoints:
|
||||||
|
- port: metrics
|
||||||
|
interval: 30s
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
path: /metrics
|
||||||
route:
|
route:
|
||||||
main:
|
main:
|
||||||
kind: HTTPRoute
|
kind: HTTPRoute
|
||||||
|
|||||||
Reference in New Issue
Block a user