chore: Update manifests after change

This commit is contained in:
2026-04-27 18:56:25 +00:00
parent a66db97dab
commit 31b0cd61b7
23 changed files with 1028 additions and 15 deletions

View File

@@ -0,0 +1,125 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: haproxy
namespace: argocd
labels:
app.kubernetes.io/name: haproxy
app.kubernetes.io/instance: argocd
app.kubernetes.io/part-of: argocd
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: HAProxyHighHTTP4xxErrorRateBackend
expr: ((sum by (proxy) (rate(haproxy_server_http_responses_total{code="4xx"}[1m])) / sum by (proxy) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (proxy) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 4xx error rate backend (instance {{ $labels.instance }})
description: "Too many HTTP requests with status 4xx (> 5%) on backend {{ $labels.proxy }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyHighHTTP5xxErrorRateBackend
expr: ((sum by (proxy) (rate(haproxy_server_http_responses_total{code="5xx"}[1m])) / sum by (proxy) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (proxy) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 5xx error rate backend (instance {{ $labels.instance }})
description: "Too many HTTP requests with status 5xx (> 5%) on backend {{ $labels.proxy }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyHighHTTP4xxErrorRateServer
expr: ((sum by (server) (rate(haproxy_server_http_responses_total{code="4xx"}[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 4xx error rate server (instance {{ $labels.instance }})
description: "Too many HTTP requests with status 4xx (> 5%) on server {{ $labels.server }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyHighHTTP5xxErrorRateServer
expr: ((sum by (server) (rate(haproxy_server_http_responses_total{code="5xx"}[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100) > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy high HTTP 5xx error rate server (instance {{ $labels.instance }})
description: "Too many HTTP requests with status 5xx (> 5%) on server {{ $labels.server }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyServerResponseErrors
expr: (sum by (server) (rate(haproxy_server_response_errors_total[1m])) / sum by (server) (rate(haproxy_server_http_responses_total[1m]))) * 100 > 5 and sum by (server) (rate(haproxy_server_http_responses_total[1m])) > 0
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy server response errors (instance {{ $labels.instance }})
description: "Too many response errors to {{ $labels.server }} server (> 5%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyBackendConnectionErrors
expr: (sum by (proxy) (rate(haproxy_backend_connection_errors_total[1m]))) > 100
for: 1m
labels:
severity: critical
annotations:
summary: HAProxy backend connection errors (instance {{ $labels.instance }})
description: "Too many connection errors to {{ $labels.proxy }} backend (> 100 req/s). Request throughput may be too high.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyServerConnectionErrors
expr: (sum by (proxy) (rate(haproxy_server_connection_errors_total[1m]))) > 100
for: 0m
labels:
severity: critical
annotations:
summary: HAProxy server connection errors (instance {{ $labels.instance }})
description: "Too many connection errors to {{ $labels.proxy }} (> 100 req/s). Request throughput may be too high.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyBackendMaxActiveSession>80%
expr: (haproxy_backend_current_sessions / haproxy_backend_limit_sessions * 100) > 80 and haproxy_backend_limit_sessions > 0
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy backend max active session > 80% (instance {{ $labels.instance }})
description: "Session limit from backend {{ $labels.proxy }} reached 80% of limit - {{ $value | printf \"%.2f\"}}%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyPendingRequests
expr: sum by (proxy) (haproxy_backend_current_queue) > 0
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy pending requests (instance {{ $labels.instance }})
description: "Some HAProxy requests are pending on {{ $labels.proxy }} - {{ $value | printf \"%.2f\"}}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyHTTPSlowingDown
expr: avg by (instance, proxy) (haproxy_backend_max_total_time_seconds) > 1
for: 1m
labels:
severity: warning
annotations:
summary: HAProxy HTTP slowing down (instance {{ $labels.instance }})
description: "HAProxy backend max total time is above 1s on {{ $labels.proxy }} - {{ $value | printf \"%.2f\"}}s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyRetryHigh
expr: sum by (proxy) (rate(haproxy_backend_retry_warnings_total[1m])) > 10
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy retry high (instance {{ $labels.instance }})
description: "High rate of retry on {{ $labels.proxy }} - {{ $value | printf \"%.2f\"}}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAproxyHasNoAliveBackends
expr: haproxy_backend_active_servers + haproxy_backend_backup_servers == 0
for: 0m
labels:
severity: critical
annotations:
summary: HAproxy has no alive backends (instance {{ $labels.instance }})
description: "HAProxy has no alive active or backup backends for {{ $labels.proxy }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyFrontendSecurityBlockedRequests
expr: sum by (proxy) (rate(haproxy_frontend_denied_connections_total[2m])) > 10
for: 2m
labels:
severity: warning
annotations:
summary: HAProxy frontend security blocked requests (instance {{ $labels.instance }})
description: "HAProxy is blocking requests for security reason\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HAProxyServerHealthcheckFailure
expr: increase(haproxy_server_check_failures_total[1m]) > 2
for: 0m
labels:
severity: warning
annotations:
summary: HAProxy server healthcheck failure (instance {{ $labels.instance }})
description: "Some server healthcheck are failing on {{ $labels.server }} ({{ $value }} in the last 1m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,45 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: cert-manager
namespace: cert-manager
labels:
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/part-of: cert-manager
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: Cert-ManagerAbsent
expr: absent(up{job="cert-manager"})
for: 10m
labels:
severity: critical
annotations:
summary: Cert-Manager absent (instance {{ $labels.instance }})
description: "Cert-Manager has disappeared from Prometheus service discovery. New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: Cert-ManagerCertificateExpiringSoon
expr: avg by (exported_namespace, namespace, name) (certmanager_certificate_expiration_timestamp_seconds - time()) < (21 * 24 * 3600)
for: 1h
labels:
severity: warning
annotations:
summary: Cert-Manager certificate expiring soon (instance {{ $labels.instance }})
description: "The certificate {{ $labels.name }} is expiring in less than 21 days.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: Cert-ManagerCertificateNotReady
expr: max by (name, exported_namespace, namespace, condition) (certmanager_certificate_ready_status{condition!="True"} == 1)
for: 10m
labels:
severity: critical
annotations:
summary: Cert-Manager certificate not ready (instance {{ $labels.instance }})
description: "The certificate {{ $labels.name }} in namespace {{ $labels.exported_namespace }} is not ready to serve traffic.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: Cert-ManagerHittingACMERateLimits
expr: sum by (host) (rate(certmanager_acme_client_request_count{status="429"}[5m])) > 0
for: 5m
labels:
severity: critical
annotations:
summary: Cert-Manager hitting ACME rate limits (instance {{ $labels.instance }})
description: "Cert-Manager is being rate-limited by the ACME provider. Certificate issuance and renewal may be blocked for up to a week.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,29 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: meilisearch
namespace: gitea
labels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: gitea
app.kubernetes.io/part-of: gitea
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: MeilisearchIndexIsEmpty
expr: meilisearch_index_docs_count == 0
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch index is empty (instance {{ $labels.instance }})
description: "Meilisearch index {{ $labels.index }} has zero documents\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: MeilisearchHttpResponseTime
expr: meilisearch_http_response_time_seconds > 0.5
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch http response time (instance {{ $labels.instance }})
description: "Meilisearch http response time is too high\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,29 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: meilisearch
namespace: jellyfin
labels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: jellyfin
app.kubernetes.io/part-of: jellyfin
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: MeilisearchIndexIsEmpty
expr: meilisearch_index_docs_count == 0
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch index is empty (instance {{ $labels.instance }})
description: "Meilisearch index {{ $labels.index }} has zero documents\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: MeilisearchHttpResponseTime
expr: meilisearch_http_response_time_seconds > 0.5
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch http response time (instance {{ $labels.instance }})
description: "Meilisearch http response time is too high\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,29 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: meilisearch
namespace: karakeep
labels:
app.kubernetes.io/name: meilisearch
app.kubernetes.io/instance: karakeep
app.kubernetes.io/part-of: karakeep
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: MeilisearchIndexIsEmpty
expr: meilisearch_index_docs_count == 0
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch index is empty (instance {{ $labels.instance }})
description: "Meilisearch index {{ $labels.index }} has zero documents\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: MeilisearchHttpResponseTime
expr: meilisearch_http_response_time_seconds > 0.5
for: 0m
labels:
severity: warning
annotations:
summary: Meilisearch http response time (instance {{ $labels.instance }})
description: "Meilisearch http response time is too high\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -13,17 +13,61 @@ spec:
groups:
- name: openbao
rules:
- alert: vault-HighResponseTime
- alert: openBao-HighResponseTime
annotations:
message: The response time of Vault is over 500ms on average over the last 5 minutes.
message: The response time of OpenBao is over 500ms on average over the last 5 minutes.
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500
for: 5m
labels:
severity: warning
- alert: vault-HighResponseTime
- alert: openBao-HighResponseTime
annotations:
message: The response time of Vault is over 1s on average over the last 5 minutes.
message: The response time of OpenBao is over 1s on average over the last 5 minutes.
expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000
for: 5m
labels:
severity: critical
- alert: openBao-Sealed
annotations:
description: |-
OpenBao instance is sealed on {{ $labels.instance }}
VALUE = {{ $value }}
LABELS = {{ $labels }}
summary: OpenBao sealed (instance {{ $labels.instance }})
expr: vault_core_unsealed == 0
for: 1m
labels:
severity: critical
- alert: OpenBao-TooManyPendingTokens
annotations:
description: |-
Too many pending tokens on {{ $labels.instance }}: {{ $value }} tokens created but not yet stored.
VALUE = {{ $value }}
LABELS = {{ $labels }}
summary: OpenBao too many pending tokens (instance {{ $labels.instance }})
expr: avg(vault_token_create_count - vault_token_store_count) > 0
for: 5m
labels:
severity: warning
- alert: OpenBao-TooManyInfinityTokens
annotations:
description: |-
Too many non-expiring tokens on {{ $labels.instance }}: {{ $value }} tokens with infinite TTL.
VALUE = {{ $value }}
LABELS = {{ $labels }}
summary: OpenBao too many infinity tokens (instance {{ $labels.instance }})
expr: vault_token_count_by_ttl{creation_ttl="+Inf"} > 3
for: 5m
labels:
severity: warning
- alert: OpenBao-ClusterHealth
annotations:
description: |-
OpenBao cluster is not healthy: only {{ $value | humanizePercentage }} of nodes are active.
VALUE = {{ $value }}
LABELS = {{ $labels }}
summary: OpenBao cluster health (instance {{ $labels.instance }})
expr: sum(vault_core_active) / count(vault_core_active) <= 0.5 and count(vault_core_active) > 0
for: 0m
labels:
severity: critical

View File

@@ -11,15 +11,15 @@ metadata:
data:
enable_json.xml: |
<clickhouse>
<settings>
<enable_json_type>1</enable_json_type>
</settings>
<settings>
<enable_json_type>1</enable_json_type>
</settings>
</clickhouse>
logging_rules.xml: |
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
<level>warning</level>
<console>true</console>
</logger>
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
@@ -32,9 +32,21 @@ data:
<latency_log remove="remove"/>
<processors_profile_log remove="remove"/>
</clickhouse>
metrics.xml: |
<clickhouse>
<prometheus>
<endpoint>/metrics</endpoint>
<port>9363</port>
<metrics>true</metrics>
<events>true</events>
<asynchronous_metrics>true</asynchronous_metrics>
<errors>true</errors>
</prometheus>
</clickhouse>
network.xml: |
<clickhouse>
<listen_host>0.0.0.0</listen_host>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
</clickhouse>
user_logging.xml: |
<clickhouse>

View File

@@ -22,7 +22,7 @@ spec:
template:
metadata:
annotations:
checksum/configMaps: 26a783f1bf2cd5bcd4f724d505a909d6e3bb7b3715f1dfa01a98609864d4ab3e
checksum/configMaps: ba4b6ef840b78e95c76ebd6d10e0cc8536bd139037769a25c40c76f1eceb029d
labels:
app.kubernetes.io/controller: backend
app.kubernetes.io/instance: rybbit

View File

@@ -22,7 +22,7 @@ spec:
template:
metadata:
annotations:
checksum/configMaps: 26a783f1bf2cd5bcd4f724d505a909d6e3bb7b3715f1dfa01a98609864d4ab3e
checksum/configMaps: ba4b6ef840b78e95c76ebd6d10e0cc8536bd139037769a25c40c76f1eceb029d
labels:
app.kubernetes.io/controller: clickhouse
app.kubernetes.io/instance: rybbit
@@ -87,6 +87,11 @@ spec:
name: clickhouse-config
readOnly: true
subPath: user_logging.xml
- mountPath: /etc/clickhouse-server/config.d/metrics.xml
mountPropagation: None
name: clickhouse-config
readOnly: true
subPath: metrics.xml
volumes:
- name: clickhouse
persistentVolumeClaim:

View File

@@ -22,7 +22,7 @@ spec:
template:
metadata:
annotations:
checksum/configMaps: 26a783f1bf2cd5bcd4f724d505a909d6e3bb7b3715f1dfa01a98609864d4ab3e
checksum/configMaps: ba4b6ef840b78e95c76ebd6d10e0cc8536bd139037769a25c40c76f1eceb029d
labels:
app.kubernetes.io/controller: client
app.kubernetes.io/instance: rybbit

View File

@@ -0,0 +1,157 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: clickhouse
namespace: rybbit
labels:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: rybbit
app.kubernetes.io/part-of: rybbit
spec:
groups:
- name: EmbeddedExporter
rules:
- alert: ClickHouseNodeDown
expr: up{job="clickhouse"} == 0
for: 2m
labels:
severity: critical
annotations:
summary: ClickHouse node down (instance {{ $labels.instance }})
description: "No metrics received from ClickHouse exporter for over 2 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseMemoryUsageCritical
expr: ClickHouseAsyncMetrics_CGroupMemoryUsed / ClickHouseAsyncMetrics_CGroupMemoryTotal * 100 > 90 and ClickHouseAsyncMetrics_CGroupMemoryTotal > 0
for: 5m
labels:
severity: critical
annotations:
summary: ClickHouse Memory Usage Critical (instance {{ $labels.instance }})
description: "Memory usage is critically high, over 90%.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseMemoryUsageWarning
expr: ClickHouseAsyncMetrics_CGroupMemoryUsed / ClickHouseAsyncMetrics_CGroupMemoryTotal * 100 > 80 and ClickHouseAsyncMetrics_CGroupMemoryTotal > 0
for: 5m
labels:
severity: warning
annotations:
summary: ClickHouse Memory Usage Warning (instance {{ $labels.instance }})
description: "Memory usage is over 80%.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseDiskSpaceLowOnDefault
expr: ClickHouseAsyncMetrics_DiskAvailable_default / (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) * 100 < 20 and (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) > 0
for: 2m
labels:
severity: warning
annotations:
summary: ClickHouse Disk Space Low on Default (instance {{ $labels.instance }})
description: "Disk space on default is below 20%.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseDiskSpaceCriticalOnDefault
expr: ClickHouseAsyncMetrics_DiskAvailable_default / (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) * 100 < 10 and (ClickHouseAsyncMetrics_DiskAvailable_default + ClickHouseAsyncMetrics_DiskUsed_default) > 0
for: 2m
labels:
severity: critical
annotations:
summary: ClickHouse Disk Space Critical on Default (instance {{ $labels.instance }})
description: "Disk space on default disk is critically low, below 10%.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseDiskSpaceLowOnBackups
expr: ClickHouseAsyncMetrics_DiskAvailable_backups / (ClickHouseAsyncMetrics_DiskAvailable_backups + ClickHouseAsyncMetrics_DiskUsed_backups) * 100 < 20 and (ClickHouseAsyncMetrics_DiskAvailable_backups + ClickHouseAsyncMetrics_DiskUsed_backups) > 0
for: 2m
labels:
severity: warning
annotations:
summary: ClickHouse Disk Space Low on Backups (instance {{ $labels.instance }})
description: "Disk space on backups is below 20%.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseReplicaErrors
expr: ClickHouseErrorMetric_ALL_REPLICAS_ARE_STALE == 1 or ClickHouseErrorMetric_ALL_REPLICAS_LOST == 1
for: 0m
labels:
severity: critical
annotations:
summary: ClickHouse Replica Errors (instance {{ $labels.instance }})
description: "Critical replica errors detected, either all replicas are stale or lost.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseNoAvailableReplicas
expr: ClickHouseErrorMetric_NO_AVAILABLE_REPLICA == 1
for: 0m
labels:
severity: critical
annotations:
summary: ClickHouse No Available Replicas (instance {{ $labels.instance }})
description: "No available replicas in ClickHouse.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseNoLiveReplicas
expr: ClickHouseErrorMetric_TOO_FEW_LIVE_REPLICAS == 1
for: 0m
labels:
severity: critical
annotations:
summary: ClickHouse No Live Replicas (instance {{ $labels.instance }})
description: "There are too few live replicas available, risking data loss and service disruption.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseHighTCPConnections
expr: ClickHouseMetrics_TCPConnection > 400
for: 5m
labels:
severity: warning
annotations:
summary: ClickHouse High TCP Connections (instance {{ $labels.instance }})
description: "High number of TCP connections, indicating heavy client or inter-cluster communication.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseInterserverConnectionIssues
expr: ClickHouseMetrics_InterserverConnection > 50
for: 5m
labels:
severity: warning
annotations:
summary: ClickHouse Interserver Connection Issues (instance {{ $labels.instance }})
description: "High number of interserver connections may indicate replication or distributed query handling issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseZooKeeperConnectionIssues
expr: ClickHouseMetrics_ZooKeeperSession != 1
for: 3m
labels:
severity: warning
annotations:
summary: ClickHouse ZooKeeper Connection Issues (instance {{ $labels.instance }})
description: "ClickHouse is experiencing issues with ZooKeeper connections, which may affect cluster state and coordination.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseAuthenticationFailures
expr: increase(ClickHouseErrorMetric_AUTHENTICATION_FAILED[5m]) > 3
for: 0m
labels:
severity: info
annotations:
summary: ClickHouse Authentication Failures (instance {{ $labels.instance }})
description: "Authentication failures detected, indicating potential security issues or misconfiguration.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseAccessDeniedErrors
expr: increase(ClickHouseErrorMetric_RESOURCE_ACCESS_DENIED[5m]) > 3
for: 0m
labels:
severity: info
annotations:
summary: ClickHouse Access Denied Errors (instance {{ $labels.instance }})
description: "Access denied errors have been logged, which could indicate permission issues or unauthorized access attempts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseRejectedInsertQueries
expr: increase(ClickHouseProfileEvents_RejectedInserts[1m]) > 2
for: 1m
labels:
severity: warning
annotations:
summary: ClickHouse rejected insert queries (instance {{ $labels.instance }})
description: "INSERTs rejected due to too many active data parts. Reduce insert frequency.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseDelayedInsertQueries
expr: increase(ClickHouseProfileEvents_DelayedInserts[5m]) > 10
for: 2m
labels:
severity: warning
annotations:
summary: ClickHouse delayed insert queries (instance {{ $labels.instance }})
description: "INSERTs delayed due to high number of active parts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseZookeeperHardwareException
expr: increase(ClickHouseProfileEvents_ZooKeeperHardwareExceptions[1m]) > 0
for: 1m
labels:
severity: critical
annotations:
summary: ClickHouse zookeeper hardware exception (instance {{ $labels.instance }})
description: "Zookeeper hardware exception: network issues communicating with ZooKeeper\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ClickHouseDistributedRejectedInserts
expr: increase(ClickHouseProfileEvents_DistributedRejectedInserts[5m]) > 3
for: 2m
labels:
severity: critical
annotations:
summary: ClickHouse distributed rejected inserts (instance {{ $labels.instance }})
description: "INSERTs into Distributed tables rejected due to pending bytes limit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -16,6 +16,10 @@ spec:
targetPort: 8123
protocol: TCP
name: http
- port: 9363
targetPort: 9363
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: clickhouse
app.kubernetes.io/instance: rybbit

View File

@@ -0,0 +1,24 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: rybbit
labels:
app.kubernetes.io/instance: rybbit
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: rybbit
helm.sh/chart: rybbit-4.6.2
namespace: rybbit
spec:
jobLabel: rybbit
namespaceSelector:
matchNames:
- rybbit
selector:
matchLabels:
app.kubernetes.io/instance: rybbit-clickhouse
app.kubernetes.io/name: rybbit-clickhouse
endpoints:
- interval: 30s
path: /metrics
port: metrics
scrapeTimeout: 10s

View File

@@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stalwart
name: stalwart-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stalwart-metrics
labels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
template:
metadata:
labels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- --es.uri=https://elasticsearch-stalwart-es-http.tubearchivist:9200
image: quay.io/prometheuscommunity/elasticsearch-exporter:v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
name: main
resources:
requests:
cpu: 1m
memory: 10mi

View File

@@ -0,0 +1,165 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: elasticsearch
namespace: stalwart
labels:
app.kubernetes.io/name: elasticsearch
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
groups:
- name: ElasticsearchExporter
rules:
- alert: ElasticsearchHeapUsageTooHigh
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch Heap Usage Too High (instance {{ $labels.instance }})
description: "The heap usage is over 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHeapUsageWarning
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch Heap Usage warning (instance {{ $labels.instance }})
description: "The heap usage is over 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchDiskOutOfSpace
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch disk out of space (instance {{ $labels.instance }})
description: "The disk usage is over 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchDiskSpaceLow
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch disk space low (instance {{ $labels.instance }})
description: "The disk usage is over 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch Cluster Red (instance {{ $labels.instance }})
description: "Elastic Cluster Red status\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch Cluster Yellow (instance {{ $labels.instance }})
description: "Elastic Cluster Yellow status\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHealthyNodes
expr: elasticsearch_cluster_health_number_of_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Nodes (instance {{ $labels.instance }})
description: "Missing node in Elasticsearch cluster\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHealthyDataNodes
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Data Nodes (instance {{ $labels.instance }})
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchRelocatingShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch relocating shards (instance {{ $labels.instance }})
description: "Elasticsearch is relocating shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchRelocatingShardsTooLong
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch relocating shards too long (instance {{ $labels.instance }})
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch initializing shards (instance {{ $labels.instance }})
description: "Elasticsearch is initializing shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchInitializingShardsTooLong
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch initializing shards too long (instance {{ $labels.instance }})
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch unassigned shards (instance {{ $labels.instance }})
description: "Elasticsearch has unassigned shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch pending tasks (instance {{ $labels.instance }})
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchNoNewDocuments
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch no new documents (instance {{ $labels.instance }})
description: "No new documents for 10 min!\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighIndexingLatency
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
for: 10m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Latency (instance {{ $labels.instance }})
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ $value }}s).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighIndexingRate
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Rate (instance {{ $labels.instance }})
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighQueryRate
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Rate (instance {{ $labels.instance }})
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighQueryLatency
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Latency (instance {{ $labels.instance }})
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ $value }}s).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: stalwart-metrics
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
app.kubernetes.io/service: stalwart-metrics
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
type: ClusterIP
ports:
- port: 9114
targetPort: 9114
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart

View File

@@ -0,0 +1,24 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: stalwart
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.6.2
namespace: stalwart
spec:
jobLabel: stalwart
namespaceSelector:
matchNames:
- stalwart
selector:
matchLabels:
app.kubernetes.io/instance: stalwart-metrics
app.kubernetes.io/name: stalwart-metrics
endpoints:
- interval: 30s
path: /metrics
port: metrics
scrapeTimeout: 10s

View File

@@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: tubearchivist
name: tubearchivist-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: tubearchivist

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: tubearchivist-metrics
labels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.6.2
namespace: tubearchivist
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/instance: tubearchivist
template:
metadata:
labels:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- --es.uri=https://elasticsearch-tubearchivist-es-http.tubearchivist:9200
image: quay.io/prometheuscommunity/elasticsearch-exporter:v1.10.0@sha256:a6a4d4403f670faf6a94b8c7f9adbca3ead91f26dd64e5ccf95fa69025dc6e58
name: main
resources:
requests:
cpu: 1m
memory: 10mi

View File

@@ -0,0 +1,165 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: elasticsearch
namespace: tubearchivist
labels:
app.kubernetes.io/name: elasticsearch
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/part-of: tubearchivist
spec:
groups:
- name: ElasticsearchExporter
rules:
- alert: ElasticsearchHeapUsageTooHigh
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch Heap Usage Too High (instance {{ $labels.instance }})
description: "The heap usage is over 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHeapUsageWarning
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80 and elasticsearch_jvm_memory_max_bytes{area="heap"} > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch Heap Usage warning (instance {{ $labels.instance }})
description: "The heap usage is over 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchDiskOutOfSpace
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10 and elasticsearch_filesystem_data_size_bytes > 0
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch disk out of space (instance {{ $labels.instance }})
description: "The disk usage is over 90%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchDiskSpaceLow
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20 and elasticsearch_filesystem_data_size_bytes > 0
for: 2m
labels:
severity: warning
annotations:
summary: Elasticsearch disk space low (instance {{ $labels.instance }})
description: "The disk usage is over 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 0m
labels:
severity: critical
annotations:
summary: Elasticsearch Cluster Red (instance {{ $labels.instance }})
description: "Elastic Cluster Red status\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch Cluster Yellow (instance {{ $labels.instance }})
description: "Elastic Cluster Yellow status\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHealthyNodes
expr: elasticsearch_cluster_health_number_of_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Nodes (instance {{ $labels.instance }})
description: "Missing node in Elasticsearch cluster\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHealthyDataNodes
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
for: 1m
labels:
severity: critical
annotations:
summary: Elasticsearch Healthy Data Nodes (instance {{ $labels.instance }})
description: "Missing data node in Elasticsearch cluster\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchRelocatingShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch relocating shards (instance {{ $labels.instance }})
description: "Elasticsearch is relocating shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchRelocatingShardsTooLong
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch relocating shards too long (instance {{ $labels.instance }})
description: "Elasticsearch has been relocating shards for 15min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: Elasticsearch initializing shards (instance {{ $labels.instance }})
description: "Elasticsearch is initializing shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchInitializingShardsTooLong
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch initializing shards too long (instance {{ $labels.instance }})
description: "Elasticsearch has been initializing shards for 15 min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 2m
labels:
severity: critical
annotations:
summary: Elasticsearch unassigned shards (instance {{ $labels.instance }})
description: "Elasticsearch has unassigned shards\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 15m
labels:
severity: warning
annotations:
summary: Elasticsearch pending tasks (instance {{ $labels.instance }})
description: "Elasticsearch has pending tasks. Cluster works slowly.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchNoNewDocuments
expr: increase(elasticsearch_indices_indexing_index_total{es_data_node="true"}[10m]) < 1
for: 0m
labels:
severity: warning
annotations:
summary: Elasticsearch no new documents (instance {{ $labels.instance }})
description: "No new documents for 10 min!\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighIndexingLatency
expr: rate(elasticsearch_indices_indexing_index_time_seconds_total[5m]) / rate(elasticsearch_indices_indexing_index_total[5m]) > 0.01 and rate(elasticsearch_indices_indexing_index_total[5m]) > 0
for: 10m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Latency (instance {{ $labels.instance }})
description: "The indexing latency on Elasticsearch cluster is higher than the threshold (current value: {{ $value }}s).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighIndexingRate
expr: sum(rate(elasticsearch_indices_indexing_index_total[1m]))> 10000
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Indexing Rate (instance {{ $labels.instance }})
description: "The indexing rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighQueryRate
expr: sum(rate(elasticsearch_indices_search_query_total[1m])) > 100
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Rate (instance {{ $labels.instance }})
description: "The query rate on Elasticsearch cluster is higher than the threshold.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: ElasticsearchHighQueryLatency
expr: rate(elasticsearch_indices_search_query_time_seconds[1m]) / rate(elasticsearch_indices_search_query_total[1m]) > 1 and rate(elasticsearch_indices_search_query_total[1m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: Elasticsearch High Query Latency (instance {{ $labels.instance }})
description: "The query latency on Elasticsearch cluster is higher than the threshold (current value: {{ $value }}s).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: tubearchivist-metrics
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
app.kubernetes.io/service: tubearchivist-metrics
helm.sh/chart: tubearchivist-4.6.2
namespace: tubearchivist
spec:
type: ClusterIP
ports:
- port: 9114
targetPort: 9114
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: metrics
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/name: tubearchivist

View File

@@ -0,0 +1,24 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: tubearchivist
labels:
app.kubernetes.io/instance: tubearchivist
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tubearchivist
helm.sh/chart: tubearchivist-4.6.2
namespace: tubearchivist
spec:
jobLabel: tubearchivist
namespaceSelector:
matchNames:
- tubearchivist
selector:
matchLabels:
app.kubernetes.io/instance: tubearchivist-metrics
app.kubernetes.io/name: tubearchivist-metrics
endpoints:
- interval: 30s
path: /metrics
port: metrics
scrapeTimeout: 10s