feat: add rules
Some checks failed
lint-test-helm / lint-helm (pull_request) Failing after 3m35s
lint-test-helm / validate-kubeconform (pull_request) Has been skipped

This commit is contained in:
2026-04-27 13:39:08 -05:00
parent 988fed6179
commit 87f21b0e03
12 changed files with 887 additions and 10 deletions

View File

@@ -0,0 +1,124 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: haproxy
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: haproxy
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: HAProxyHighHTTP4xxErrorRateBackend
expr: ((sum by (proxy) (rate(haproxy_server_http_responses_total{code="4xx"}[1m])) / sum by (proxy) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (proxy) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 4xx error rate backend (instance {{ `{{ $labels.instance }}` }})
description: "Too many HTTP requests with status 4xx (> 5%) on backend {{ `{{ $labels.proxy }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyHighHTTP5xxErrorRateBackend
expr: ((sum by (proxy) (rate(haproxy_server_http_responses_total{code="5xx"}[1m])) / sum by (proxy) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (proxy) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 5xx error rate backend (instance {{ `{{ $labels.instance }}` }})
description: "Too many HTTP requests with status 5xx (> 5%) on backend {{ `{{ $labels.proxy }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyHighHTTP4xxErrorRateServer
expr: ((sum by (server) (rate(haproxy_server_http_responses_total{code="4xx"}[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 4xx error rate server (instance {{ `{{ $labels.instance }}` }})
description: "Too many HTTP requests with status 4xx (> 5%) on server {{ `{{ $labels.server }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyHighHTTP5xxErrorRateServer
expr: ((sum by (server) (rate(haproxy_server_http_responses_total{code="5xx"}[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 5xx error rate server (instance {{ `{{ $labels.instance }}` }})
description: "Too many HTTP requests with status 5xx (> 5%) on server {{ `{{ $labels.server }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyServerResponseErrors
expr: (sum by (server) (rate(haproxy_server_response_errors_total[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100 > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy server response errors (instance {{ `{{ $labels.instance }}` }})
description: "Too many response errors to {{ `{{ $labels.server }}` }} server (> 5%).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyBackendConnectionErrors
expr: (sum by (proxy) (rate(haproxy_backend_connection_errors_total[1m]))) > 100
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy backend connection errors (instance {{ `{{ $labels.instance }}` }})
description: "Too many connection errors to {{ `{{ $labels.proxy }}` }} backend (> 100 req/s). Request throughput may be too high.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyServerConnectionErrors
expr: (sum by (proxy) (rate(haproxy_server_connection_errors_total[1m]))) > 100
for: 0m
labels:
severity: critical
annotations:
summary: HAProxy server connection errors (instance {{ `{{ $labels.instance }}` }})
description: "Too many connection errors to {{ `{{ $labels.proxy }}` }} (> 100 req/s). Request throughput may be too high.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyBackendMaxActiveSession>80%
expr: (haproxy_backend_current_sessions / haproxy_backend_limit_sessions * 100) > 80 and haproxy_backend_limit_sessions > 0
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy backend max active session > 80% (instance {{ `{{ $labels.instance }}` }})
description: "Session limit from backend {{ `{{ $labels.proxy }}` }} reached 80% of limit - {{ `{{ $value | printf \"%.2f\"}}` }}%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyPendingRequests
expr: sum by (proxy) (haproxy_backend_current_queue) > 0
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy pending requests (instance {{ `{{ $labels.instance }}` }})
description: "Some HAProxy requests are pending on {{ `{{ $labels.proxy }}` }} - {{ `{{ $value | printf \"%.2f\"}}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyHTTPSlowingDown
expr: avg by (instance, proxy) (haproxy_backend_max_total_time_seconds) > 1
for: 1m
labels:
severity: warning
annotations:
summary: HAProxy HTTP slowing down (instance {{ `{{ $labels.instance }}` }})
description: "HAProxy backend max total time is above 1s on {{ `{{ $labels.proxy }}` }} - {{ `{{ $value | printf \"%.2f\"}}` }}s\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyRetryHigh
expr: sum by (proxy) (rate(haproxy_backend_retry_warnings_total[1m])) > 10
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy retry high (instance {{ `{{ $labels.instance }}` }})
description: "High rate of retry on {{ `{{ $labels.proxy }}` }} - {{ `{{ $value | printf \"%.2f\"}}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAproxyHasNoAliveBackends
expr: haproxy_backend_active_servers + haproxy_backend_backup_servers == 0
for: 0m
labels:
severity: critical
annotations:
summary: HAproxy has no alive backends (instance {{ `{{ $labels.instance }}` }})
description: "HAProxy has no alive active or backup backends for {{ `{{ $labels.proxy }}` }}\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyFrontendSecurityBlockedRequests
expr: sum by (proxy) (rate(haproxy_frontend_denied_connections_total[2m])) > 10
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy frontend security blocked requests (instance {{ `{{ $labels.instance }}` }})
description: "HAProxy is blocking requests for security reason\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: HAProxyServerHealthcheckFailure
expr: increase(haproxy_server_check_failures_total[1m]) > 2
for: 0m
labels:
severity: warning
annotations:
summary: HAProxy server healthcheck failure (instance {{ `{{ $labels.instance }}` }})
description: "Some server healthcheck are failing on {{ `{{ $labels.server }}` }} ({{ `{{ $value }}` }} in the last 1m)\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -0,0 +1,44 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: cert-manager
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: cert-manager
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: Cert-ManagerAbsent
expr: absent(up{job="cert-manager"})
for: 10m
labels:
severity: critical
annotations:
summary: Cert-Manager absent (instance {{ `{{ $labels.instance }}` }})
description: "Cert-Manager has disappeared from Prometheus service discovery. New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: Cert-ManagerCertificateExpiringSoon
expr: avg by (exported_namespace, namespace, name) (certmanager_certificate_expiration_timestamp_seconds - time()) < (21 * 24 * 3600)
for: 1h
labels:
severity: warning
annotations:
summary: Cert-Manager certificate expiring soon (instance {{ `{{ $labels.instance }}` }})
description: "The certificate {{ `{{ $labels.name }}` }} is expiring in less than 21 days.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: Cert-ManagerCertificateNotReady
expr: max by (name, exported_namespace, namespace, condition) (certmanager_certificate_ready_status{condition!="True"} == 1)
for: 10m
labels:
severity: critical
annotations:
summary: Cert-Manager certificate not ready (instance {{ `{{ $labels.instance }}` }})
description: "The certificate {{ `{{ $labels.name }}` }} in namespace {{ `{{ $labels.exported_namespace }}` }} is not ready to serve traffic.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: Cert-ManagerHittingACMERateLimits
expr: sum by (host) (rate(certmanager_acme_client_request_count{status="429"}[5m])) > 0
for: 5m
labels:
severity: critical
annotations:
summary: Cert-Manager hitting ACME rate limits (instance {{ `{{ $labels.instance }}` }})
description: "Cert-Manager is being rate-limited by the ACME provider. Certificate issuance and renewal may be blocked for up to a week.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -0,0 +1,28 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: meilisearch
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: meilisearch
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: MeilisearchIndexIsEmpty
expr: meilisearch_index_docs_count == 0
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch index is empty (instance {{ `{{ $labels.instance }}` }})
description: "Meilisearch index {{ `{{ $labels.index }}` }} has zero documents\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: MeilisearchHttpResponseTime
expr: meilisearch_http_response_time_seconds > 0.5
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch http response time (instance {{ `{{ $labels.instance }}` }})
description: "Meilisearch http response time is too high\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -0,0 +1,28 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: meilisearch
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: meilisearch
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: MeilisearchIndexIsEmpty
expr: meilisearch_index_docs_count == 0
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch index is empty (instance {{ `{{ $labels.instance }}` }})
description: "Meilisearch index {{ `{{ $labels.index }}` }} has zero documents\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: MeilisearchHttpResponseTime
expr: meilisearch_http_response_time_seconds > 0.5
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch http response time (instance {{ `{{ $labels.instance }}` }})
description: "Meilisearch http response time is too high\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -0,0 +1,28 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: meilisearch
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: meilisearch
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: MeilisearchIndexIsEmpty
expr: meilisearch_index_docs_count == 0
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch index is empty (instance {{ `{{ $labels.instance }}` }})
description: "Meilisearch index {{ `{{ $labels.index }}` }} has zero documents\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: MeilisearchHttpResponseTime
expr: meilisearch_http_response_time_seconds > 0.5
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch http response time (instance {{ `{{ $labels.instance }}` }})
description: "Meilisearch http response time is too high\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -120,20 +120,52 @@ openbao:
prometheusRules: prometheusRules:
enabled: true enabled: true
rules: rules:
- alert: vault-HighResponseTime - alert: openBao-HighResponseTime
annotations: annotations:
message: The response time of Vault is over 500ms on average over the last 5 minutes. message: The response time of OpenBao is over 500ms on average over the last 5 minutes.
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500 expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500
for: 5m for: 5m
labels: labels:
severity: warning severity: warning
- alert: vault-HighResponseTime - alert: openBao-HighResponseTime
annotations: annotations:
message: The response time of Vault is over 1s on average over the last 5 minutes. message: The response time of OpenBao is over 1s on average over the last 5 minutes.
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000 expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000
for: 5m for: 5m
labels: labels:
severity: critical severity: critical
- alert: openBao-Sealed
expr: vault_core_unsealed == 0
for: 1m
labels:
severity: critical
annotations:
summary: OpenBao sealed (instance {{ $labels.instance }})
description: "OpenBao instance is sealed on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: OpenBao-TooManyPendingTokens
expr: avg(vault_token_create_count - vault_token_store_count) > 0
for: 5m
labels:
severity: warning
annotations:
summary: OpenBao too many pending tokens (instance {{ $labels.instance }})
description: "Too many pending tokens on {{ $labels.instance }}: {{ $value }} tokens created but not yet stored.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: OpenBao-TooManyInfinityTokens
expr: vault_token_count_by_ttl{creation_ttl="+Inf"} > 3
for: 5m
labels:
severity: warning
annotations:
summary: OpenBao too many infinity tokens (instance {{ $labels.instance }})
description: "Too many non-expiring tokens on {{ $labels.instance }}: {{ $value }} tokens with infinite TTL.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: OpenBao-ClusterHealth
expr: sum(vault_core_active) / count(vault_core_active) <= 0.5 and count(vault_core_active) > 0
for: 0m
labels:
severity: critical
annotations:
summary: OpenBao cluster health (instance {{ $labels.instance }})
description: "OpenBao cluster is not healthy: only {{ $value | humanizePercentage }} of nodes are active.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
snapshotAgent: snapshotAgent:
enabled: true enabled: true
schedule: 0 4 * * * schedule: 0 4 * * *

View File

@@ -0,0 +1,156 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: clickhouse
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: clickhouse
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: ClickHouseNodeDown
expr: up{job="clickhouse"} == 0
for: 2m
labels:
severity: critical
annotations:
summary: ClickHouse node down (instance {{ `{{ $labels.instance }}` }})
description: "No metrics received from ClickHouse exporter for over 2 minutes.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseMemoryUsageCritical
expr: ClickHouseAsyncMetrics_CGroupMemoryUsed / ClickHouseAsyncMetrics_CGroupMemoryTotal * 100 > 90 and ClickHouseAsyncMetrics_CGroupMemoryTotal > 0
for: 5m
labels:
severity: critical
annotations:
summary: ClickHouse Memory Usage Critical (instance {{ `{{ $labels.instance }}` }})
description: "Memory usage is critically high, over 90%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseMemoryUsageWarning
expr: ClickHouseAsyncMetrics_CGroupMemoryUsed / ClickHouseAsyncMetrics_CGroupMemoryTotal * 100 > 80 and ClickHouseAsyncMetrics_CGroupMemoryTotal > 0
for: 5m
labels:
severity: warning
annotations:
summary: ClickHouse Memory Usage Warning (instance {{ `{{ $labels.instance }}` }})
description: "Memory usage is over 80%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseDiskSpaceLowOnDefault
expr: ClickHouseAsyncMetrics_DiskAvailable_default / (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) * 100 < 20 and (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) > 0
for: 2m
labels:
severity: warning
annotations:
summary: ClickHouse Disk Space Low on Default (instance {{ `{{ $labels.instance }}` }})
description: "Disk space on default is below 20%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseDiskSpaceCriticalOnDefault
expr: ClickHouseAsyncMetrics_DiskAvailable_default / (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) * 100 < 10 and (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) > 0
for: 2m
labels:
severity: critical
annotations:
summary: ClickHouse Disk Space Critical on Default (instance {{ `{{ $labels.instance }}` }})
description: "Disk space on default disk is critically low, below 10%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseDiskSpaceLowOnBackups
expr: ClickHouseAsyncMetrics_DiskAvailable_backups / (ClickHouseAsyncMetrics_DiskAvailable_backups + ClickHouseAsyncMetrics_DiskUsed_backups) * 100 < 20 and (ClickHouseAsyncMetrics_DiskAvailable_backups + ClickHouseAsyncMetrics_DiskUsed_backups) > 0
for: 2m
labels:
severity: warning
annotations:
summary: ClickHouse Disk Space Low on Backups (instance {{ `{{ $labels.instance }}` }})
description: "Disk space on backups is below 20%.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseReplicaErrors
expr: ClickHouseErrorMetric_ALL_REPLICAS_ARE_STALE == 1 or ClickHouseErrorMetric_ALL_REPLICAS_LOST == 1
for: 0m
labels:
severity: critical
annotations:
summary: ClickHouse Replica Errors (instance {{ `{{ $labels.instance }}` }})
description: "Critical replica errors detected, either all replicas are stale or lost.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseNoAvailableReplicas
expr: ClickHouseErrorMetric_NO_AVAILABLE_REPLICA == 1
for: 0m
labels:
severity: critical
annotations:
summary: ClickHouse No Available Replicas (instance {{ `{{ $labels.instance }}` }})
description: "No available replicas in ClickHouse.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseNoLiveReplicas
expr: ClickHouseErrorMetric_TOO_FEW_LIVE_REPLICAS == 1
for: 0m
labels:
severity: critical
annotations:
summary: ClickHouse No Live Replicas (instance {{ `{{ $labels.instance }}` }})
description: "There are too few live replicas available, risking data loss and service disruption.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseHighTCPConnections
expr: ClickHouseMetrics_TCPConnection > 400
for: 5m
labels:
severity: warning
annotations:
summary: ClickHouse High TCP Connections (instance {{ `{{ $labels.instance }}` }})
description: "High number of TCP connections, indicating heavy client or inter-cluster communication.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseInterserverConnectionIssues
expr: ClickHouseMetrics_InterserverConnection > 50
for: 5m
labels:
severity: warning
annotations:
summary: ClickHouse Interserver Connection Issues (instance {{ `{{ $labels.instance }}` }})
description: "High number of interserver connections may indicate replication or distributed query handling issues.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseZooKeeperConnectionIssues
expr: ClickHouseMetrics_ZooKeeperSession != 1
for: 3m
labels:
severity: warning
annotations:
summary: ClickHouse ZooKeeper Connection Issues (instance {{ `{{ $labels.instance }}` }})
description: "ClickHouse is experiencing issues with ZooKeeper connections, which may affect cluster state and coordination.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseAuthenticationFailures
expr: increase(ClickHouseErrorMetric_AUTHENTICATION_FAILED[5m]) > 3
for: 0m
labels:
severity: info
annotations:
summary: ClickHouse Authentication Failures (instance {{ `{{ $labels.instance }}` }})
description: "Authentication failures detected, indicating potential security issues or misconfiguration.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseAccessDeniedErrors
expr: increase(ClickHouseErrorMetric_RESOURCE_ACCESS_DENIED[5m]) > 3
for: 0m
labels:
severity: info
annotations:
summary: ClickHouse Access Denied Errors (instance {{ `{{ $labels.instance }}` }})
description: "Access denied errors have been logged, which could indicate permission issues or unauthorized access attempts.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseRejectedInsertQueries
expr: increase(ClickHouseProfileEvents_RejectedInserts[1m]) > 2
for: 1m
labels:
severity: warning
annotations:
summary: ClickHouse rejected insert queries (instance {{ `{{ $labels.instance }}` }})
description: "INSERTs rejected due to too many active data parts. Reduce insert frequency.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseDelayedInsertQueries
expr: increase(ClickHouseProfileEvents_DelayedInserts[5m]) > 10
for: 2m
labels:
severity: warning
annotations:
summary: ClickHouse delayed insert queries (instance {{ `{{ $labels.instance }}` }})
description: "INSERTs delayed due to high number of active parts.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseZookeeperHardwareException
expr: increase(ClickHouseProfileEvents_ZooKeeperHardwareExceptions[1m]) > 0
for: 1m
labels:
severity: critical
annotations:
summary: ClickHouse zookeeper hardware exception (instance {{ `{{ $labels.instance }}` }})
description: "Zookeeper hardware exception: network issues communicating with ZooKeeper\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ClickHouseDistributedRejectedInserts
expr: increase(ClickHouseProfileEvents_DistributedRejectedInserts[5m]) > 3
for: 2m
labels:
severity: critical
annotations:
summary: ClickHouse distributed rejected inserts (instance {{ `{{ $labels.instance }}` }})
description: "INSERTs into Distributed tables rejected due to pending bytes limit.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -150,19 +150,20 @@ rybbit:
data: data:
network.xml: | network.xml: |
<clickhouse> <clickhouse>
<listen_host>0.0.0.0</listen_host> <listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
</clickhouse> </clickhouse>
enable_json.xml: | enable_json.xml: |
<clickhouse> <clickhouse>
<settings> <settings>
<enable_json_type>1</enable_json_type> <enable_json_type>1</enable_json_type>
</settings> </settings>
</clickhouse> </clickhouse>
logging_rules.xml: | logging_rules.xml: |
<clickhouse> <clickhouse>
<logger> <logger>
<level>warning</level> <level>warning</level>
<console>true</console> <console>true</console>
</logger> </logger>
<query_thread_log remove="remove"/> <query_thread_log remove="remove"/>
<query_log remove="remove"/> <query_log remove="remove"/>
@@ -185,6 +186,17 @@ rybbit:
</default> </default>
</profiles> </profiles>
</clickhouse> </clickhouse>
metrics.xml: |
<clickhouse>
<prometheus>
<endpoint>/metrics</endpoint>
<port>9363</port>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
<errors>true</errors>
</prometheus>
</clickhouse>
service: service:
backend: backend:
controller: backend controller: backend
@@ -204,6 +216,21 @@ rybbit:
http: http:
port: 8123 port: 8123
targetPort: 8123 targetPort: 8123
metrics:
port: 9363
targetPort: 9363
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: rybbit-clickhouse
app.kubernetes.io/instance: rybbit-clickhouse
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics
persistence: persistence:
clickhouse: clickhouse:
forceRename: clickhouse-data forceRename: clickhouse-data
@@ -238,6 +265,10 @@ rybbit:
readOnly: true readOnly: true
mountPropagation: None mountPropagation: None
subPath: user_logging.xml subPath: user_logging.xml
- path: /etc/clickhouse-server/config.d/metrics.xml
readOnly: true
mountPropagation: None
subPath: metrics.xml
postgres-18-cluster: postgres-18-cluster:
mode: recovery mode: recovery
recovery: recovery:

View File

@@ -0,0 +1,169 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: elasticsearch
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: elasticsearch
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: ElasticsearchExporter
rules:
- alert: ElasticsearchHeapUsageTooHigh
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch Heap Usage Too High (instance {{ `{{ $labels.instance }}` }})
description: "The heap usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchHeapUsageWarning
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch Heap Usage warning (instance {{ `{{ $labels.instance }}` }})
description: "The heap usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchDiskOutOfSpace
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch disk out of space (instance {{ `{{ $labels.instance }}` }})
description: "The disk usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchDiskSpaceLow
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch disk space low (instance {{ `{{ $labels.instance }}` }})
description: "The disk usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch Cluster Red (instance {{ `{{ $labels.instance }}` }})
description: "Elastic Cluster Red status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch Cluster Yellow (instance {{ `{{ $labels.instance }}` }})
description: "Elastic Cluster Yellow status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# 1m delay allows a restart without triggering an alert.
- alert: ElasticsearchHealthyNodes
expr: elasticsearch_cluster_health_number_of_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Nodes (instance {{ `{{ $labels.instance }}` }})
description: "Missing node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# 1m delay allows a restart without triggering an alert.
- alert: ElasticsearchHealthyDataNodes
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Data Nodes (instance {{ `{{ $labels.instance }}` }})
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchRelocatingShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch relocating shards (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch is relocating shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchRelocatingShardsTooLong
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch relocating shards too long (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch initializing shards (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch is initializing shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchInitializingShardsTooLong
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch initializing shards too long (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch unassigned shards (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has unassigned shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch pending tasks (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchNoNewDocuments
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch no new documents (instance {{ `{{ $labels.instance }}` }})
description: "No new documents for 10 min!\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# Threshold of 10ms (0.01s) per indexing operation is a rough default. Adjust based on your document size and cluster performance.
- alert: ElasticsearchHighIndexingLatency
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
for: 10m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Latency (instance {{ `{{ $labels.instance }}` }})
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# Threshold of 10000 ops/s is a rough default. Adjust based on your cluster capacity and expected workload.
- alert: ElasticsearchHighIndexingRate
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Rate (instance {{ `{{ $labels.instance }}` }})
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# Threshold of 100 queries/s is very low for most production clusters. Adjust based on your expected query volume.
- alert: ElasticsearchHighQueryRate
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Rate (instance {{ `{{ $labels.instance }}` }})
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchHighQueryLatency
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Latency (instance {{ `{{ $labels.instance }}` }})
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -13,9 +13,25 @@ stalwart:
requests: requests:
cpu: 10m cpu: 10m
memory: 100Mi memory: 100Mi
metrics:
type: deployment
replicas: 1
strategy: Recreate
containers:
main:
image:
repository: quay.io/prometheuscommunity/elasticsearch-exporter
tag: v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
args:
- '--es.uri=https://elasticsearch-stalwart-es-http.tubearchivist:9200'
resources:
requests:
cpu: 1m
memory: 10mi
service: service:
main: main:
controller: main controller: main
forceRename: stalwart
ports: ports:
http: http:
port: 80 port: 80
@@ -32,6 +48,24 @@ stalwart:
imaps: imaps:
port: 993 port: 993
targetPort: 993 targetPort: 993
metrics:
controller: metrics
ports:
metrics:
port: 9114
targetPort: 9114
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: stalwart-metrics
app.kubernetes.io/instance: stalwart-metrics
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics
route: route:
main: main:
kind: HTTPRoute kind: HTTPRoute

View File

@@ -0,0 +1,169 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: elasticsearch
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: elasticsearch
{{- include "custom.labels" . | nindent 4 }}
spec:
groups:
- name: ElasticsearchExporter
rules:
- alert: ElasticsearchHeapUsageTooHigh
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch Heap Usage Too High (instance {{ `{{ $labels.instance }}` }})
description: "The heap usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchHeapUsageWarning
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch Heap Usage warning (instance {{ `{{ $labels.instance }}` }})
description: "The heap usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchDiskOutOfSpace
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch disk out of space (instance {{ `{{ $labels.instance }}` }})
description: "The disk usage is over 90%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchDiskSpaceLow
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch disk space low (instance {{ `{{ $labels.instance }}` }})
description: "The disk usage is over 80%\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch Cluster Red (instance {{ `{{ $labels.instance }}` }})
description: "Elastic Cluster Red status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch Cluster Yellow (instance {{ `{{ $labels.instance }}` }})
description: "Elastic Cluster Yellow status\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# 1m delay allows a restart without triggering an alert.
- alert: ElasticsearchHealthyNodes
expr: elasticsearch_cluster_health_number_of_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Nodes (instance {{ `{{ $labels.instance }}` }})
description: "Missing node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# 1m delay allows a restart without triggering an alert.
- alert: ElasticsearchHealthyDataNodes
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Data Nodes (instance {{ `{{ $labels.instance }}` }})
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchRelocatingShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch relocating shards (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch is relocating shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchRelocatingShardsTooLong
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch relocating shards too long (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch initializing shards (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch is initializing shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchInitializingShardsTooLong
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch initializing shards too long (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch unassigned shards (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has unassigned shards\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch pending tasks (instance {{ `{{ $labels.instance }}` }})
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchNoNewDocuments
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch no new documents (instance {{ `{{ $labels.instance }}` }})
description: "No new documents for 10 min!\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# Threshold of 10ms (0.01s) per indexing operation is a rough default. Adjust based on your document size and cluster performance.
- alert: ElasticsearchHighIndexingLatency
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
for: 10m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Latency (instance {{ `{{ $labels.instance }}` }})
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# Threshold of 10000 ops/s is a rough default. Adjust based on your cluster capacity and expected workload.
- alert: ElasticsearchHighIndexingRate
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Rate (instance {{ `{{ $labels.instance }}` }})
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
# Threshold of 100 queries/s is very low for most production clusters. Adjust based on your expected query volume.
- alert: ElasticsearchHighQueryRate
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Rate (instance {{ `{{ $labels.instance }}` }})
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"
- alert: ElasticsearchHighQueryLatency
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Latency (instance {{ `{{ $labels.instance }}` }})
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ `{{ $value }}` }}s).\n VALUE = {{ `{{ $value }}` }}\n LABELS = {{ `{{ $labels }}` }}"

View File

@@ -96,13 +96,47 @@ tubearchivist:
devic.es/tun: "1" devic.es/tun: "1"
requests: requests:
devic.es/tun: "1" devic.es/tun: "1"
metrics:
type: deployment
replicas: 1
strategy: Recreate
containers:
main:
image:
repository: quay.io/prometheuscommunity/elasticsearch-exporter
tag: v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
args:
- '--es.uri=https://elasticsearch-tubearchivist-es-http.tubearchivist:9200'
resources:
requests:
cpu: 1m
memory: 10mi
service: service:
main: main:
controller: main controller: main
forceRename: tubearchivist
ports: ports:
http: http:
port: 80 port: 80
targetPort: 24000 targetPort: 24000
metrics:
controller: metrics
ports:
metrics:
port: 9114
targetPort: 9114
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: tubearchivist-metrics
app.kubernetes.io/instance: tubearchivist-metrics
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics
route: route:
main: main:
kind: HTTPRoute kind: HTTPRoute