192 lines
11 KiB
YAML
192 lines
11 KiB
YAML
---
|
|
# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml
|
|
apiVersion: monitoring.coreos.com/v1
|
|
kind: PrometheusRule
|
|
metadata:
|
|
name: kube-prometheus-stack-kubernetes-system-kubelet
|
|
namespace: kube-prometheus-stack
|
|
labels:
|
|
app: kube-prometheus-stack
|
|
app.kubernetes.io/managed-by: Helm
|
|
app.kubernetes.io/instance: kube-prometheus-stack
|
|
app.kubernetes.io/version: "79.11.0"
|
|
app.kubernetes.io/part-of: kube-prometheus-stack
|
|
chart: kube-prometheus-stack-79.11.0
|
|
release: "kube-prometheus-stack"
|
|
heritage: "Helm"
|
|
spec:
|
|
groups:
|
|
- name: kubernetes-system-kubelet
|
|
rules:
|
|
- alert: KubeNodeNotReady
|
|
annotations:
|
|
description: '{{ $labels.node }} has been unready for more than 15 minutes on cluster {{ $labels.cluster }}.'
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodenotready
|
|
summary: Node is not ready.
|
|
expr: |-
|
|
kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0
|
|
and on (cluster, node)
|
|
kube_node_spec_unschedulable{job="kube-state-metrics"} == 0
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeNodePressure
|
|
annotations:
|
|
description: '{{ $labels.node }} on cluster {{ $labels.cluster }} has active Condition {{ $labels.condition }}. This is caused by resource usage exceeding eviction thresholds.'
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodepressure
|
|
summary: Node has as active Condition.
|
|
expr: |-
|
|
kube_node_status_condition{job="kube-state-metrics",condition=~"(MemoryPressure|DiskPressure|PIDPressure)",status="true"} == 1
|
|
and on (cluster, node)
|
|
kube_node_spec_unschedulable{job="kube-state-metrics"} == 0
|
|
for: 10m
|
|
labels:
|
|
severity: info
|
|
- alert: KubeNodeUnreachable
|
|
annotations:
|
|
description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled on cluster {{ $labels.cluster }}.'
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodeunreachable
|
|
summary: Node is unreachable.
|
|
expr: (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletTooManyPods
|
|
annotations:
|
|
description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubelettoomanypods
|
|
summary: Kubelet is running at capacity.
|
|
expr: |-
|
|
(
|
|
max by (cluster, instance) (
|
|
kubelet_running_pods{job="kubelet", metrics_path="/metrics"} > 1
|
|
)
|
|
* on (cluster, instance) group_left(node)
|
|
max by (cluster, instance, node) (
|
|
kubelet_node_name{job="kubelet", metrics_path="/metrics"}
|
|
)
|
|
)
|
|
/ on (cluster, node) group_left()
|
|
max by (cluster, node) (
|
|
kube_node_status_capacity{job="kube-state-metrics", resource="pods"} != 1
|
|
) > 0.95
|
|
for: 15m
|
|
labels:
|
|
severity: info
|
|
- alert: KubeNodeReadinessFlapping
|
|
annotations:
|
|
description: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodereadinessflapping
|
|
summary: Node readiness status is flapping.
|
|
expr: |-
|
|
sum(changes(kube_node_status_condition{job="kube-state-metrics",status="true",condition="Ready"}[15m])) by (cluster, node) > 2
|
|
and on (cluster, node)
|
|
kube_node_spec_unschedulable{job="kube-state-metrics"} == 0
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeNodeEviction
|
|
annotations:
|
|
description: Node {{ $labels.node }} on {{ $labels.cluster }} is evicting Pods due to {{ $labels.eviction_signal }}. Eviction occurs when eviction thresholds are crossed, typically caused by Pods exceeding RAM/ephemeral-storage limits.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodeeviction
|
|
summary: Node is evicting pods.
|
|
expr: |-
|
|
sum(rate(kubelet_evictions{job="kubelet", metrics_path="/metrics"}[15m])) by (cluster, eviction_signal, instance)
|
|
* on (cluster, instance) group_left(node)
|
|
max by (cluster, instance, node) (
|
|
kubelet_node_name{job="kubelet", metrics_path="/metrics"}
|
|
)
|
|
> 0
|
|
for: 0s
|
|
labels:
|
|
severity: info
|
|
- alert: KubeletPlegDurationHigh
|
|
annotations:
|
|
description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }} on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletplegdurationhigh
|
|
summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist.
|
|
expr: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletPodStartUpLatencyHigh
|
|
annotations:
|
|
description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }} on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletpodstartuplatencyhigh
|
|
summary: Kubelet Pod startup latency is too high.
|
|
expr: |-
|
|
histogram_quantile(0.99,
|
|
sum by (cluster, instance, le) (
|
|
topk by (cluster, instance, le, operation_type) (1,
|
|
rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])
|
|
)
|
|
)
|
|
)
|
|
* on (cluster, instance) group_left(node)
|
|
topk by (cluster, instance, node) (1,
|
|
kubelet_node_name{job="kubelet", metrics_path="/metrics"}
|
|
)
|
|
> 60
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletClientCertificateExpiration
|
|
annotations:
|
|
description: Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration
|
|
summary: Kubelet client certificate is about to expire.
|
|
expr: kubelet_certificate_manager_client_ttl_seconds < 604800
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletClientCertificateExpiration
|
|
annotations:
|
|
description: Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration
|
|
summary: Kubelet client certificate is about to expire.
|
|
expr: kubelet_certificate_manager_client_ttl_seconds < 86400
|
|
labels:
|
|
severity: critical
|
|
- alert: KubeletServerCertificateExpiration
|
|
annotations:
|
|
description: Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration
|
|
summary: Kubelet server certificate is about to expire.
|
|
expr: kubelet_certificate_manager_server_ttl_seconds < 604800
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletServerCertificateExpiration
|
|
annotations:
|
|
description: Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration
|
|
summary: Kubelet server certificate is about to expire.
|
|
expr: kubelet_certificate_manager_server_ttl_seconds < 86400
|
|
labels:
|
|
severity: critical
|
|
- alert: KubeletClientCertificateRenewalErrors
|
|
annotations:
|
|
description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes) on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificaterenewalerrors
|
|
summary: Kubelet has failed to renew its client certificate.
|
|
expr: increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletServerCertificateRenewalErrors
|
|
annotations:
|
|
description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes) on cluster {{ $labels.cluster }}.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificaterenewalerrors
|
|
summary: Kubelet has failed to renew its server certificate.
|
|
expr: increase(kubelet_server_expiration_renew_errors[5m]) > 0
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
- alert: KubeletDown
|
|
annotations:
|
|
description: Kubelet has disappeared from Prometheus target discovery.
|
|
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletdown
|
|
summary: Target disappeared from Prometheus target discovery.
|
|
expr: absent(up{job="kubelet", metrics_path="/metrics"})
|
|
for: 15m
|
|
labels:
|
|
severity: critical
|