From 845605bf55e2006988b45083ca4dd3d9a936d212 Mon Sep 17 00:00:00 2001 From: gitea-bot Date: Tue, 2 Dec 2025 02:20:58 +0000 Subject: [PATCH] chore: Update manifests after change --- .../argo-workflows/argo-workflows.yaml | 4999 +++++++ clusters/cl01tl/manifests/gatus/gatus.yaml | 1527 +++ .../grafana-operator/grafana-operator.yaml | 10862 ++++++++++++++++ .../cl01tl/manifests/headlamp/headlamp.yaml | 308 + clusters/cl01tl/manifests/komodo/komodo.yaml | 945 ++ clusters/cl01tl/manifests/kronic/kronic.yaml | 228 + .../kube-prometheus-stack.yaml | 5728 ++++++++ clusters/cl01tl/manifests/loki/loki.yaml | 1535 +++ .../manifests/s3-exporter/s3-exporter.yaml | 559 + .../manifests/shelly-plug/shelly-plug.yaml | 185 + clusters/cl01tl/manifests/trivy/trivy.yaml | 5222 ++++++++ .../cl01tl/manifests/unpoller/unpoller.yaml | 151 + 12 files changed, 32249 insertions(+) create mode 100644 clusters/cl01tl/manifests/argo-workflows/argo-workflows.yaml create mode 100644 clusters/cl01tl/manifests/gatus/gatus.yaml create mode 100644 clusters/cl01tl/manifests/grafana-operator/grafana-operator.yaml create mode 100644 clusters/cl01tl/manifests/headlamp/headlamp.yaml create mode 100644 clusters/cl01tl/manifests/komodo/komodo.yaml create mode 100644 clusters/cl01tl/manifests/kronic/kronic.yaml create mode 100644 clusters/cl01tl/manifests/kube-prometheus-stack/kube-prometheus-stack.yaml create mode 100644 clusters/cl01tl/manifests/loki/loki.yaml create mode 100644 clusters/cl01tl/manifests/s3-exporter/s3-exporter.yaml create mode 100644 clusters/cl01tl/manifests/shelly-plug/shelly-plug.yaml create mode 100644 clusters/cl01tl/manifests/trivy/trivy.yaml create mode 100644 clusters/cl01tl/manifests/unpoller/unpoller.yaml diff --git a/clusters/cl01tl/manifests/argo-workflows/argo-workflows.yaml b/clusters/cl01tl/manifests/argo-workflows/argo-workflows.yaml new file mode 100644 index 000000000..aee410f71 --- /dev/null +++ b/clusters/cl01tl/manifests/argo-workflows/argo-workflows.yaml @@ -0,0 +1,4999 @@ +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: argo-workflows-argo-events-controller-manager + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-webhook/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: argo-workflows-argo-events-events-webhook + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: events-webhook + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-workflows-workflow-controller + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-workflows-server + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-workflows-argo-events-controller-manager + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +data: + controller-config.yaml: | + eventBus: + nats: + versions: + - version: latest + natsStreamingImage: nats-streaming:latest + metricsExporterImage: natsio/prometheus-nats-exporter:latest + - version: 0.22.1 + natsStreamingImage: nats-streaming:0.22.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.8.0 + jetstream: + # Default JetStream settings, could be overridden by EventBus JetStream specs + settings: | + # https://docs.nats.io/running-a-nats-service/configuration#jetstream + # Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded. + max_memory_store: -1 + max_file_store: -1 + # The default properties of the streams to be created in this JetStream service + streamConfig: | + maxMsgs: 1e+06 + maxAge: 72h + maxBytes: 1GB + replicas: 3 + duplicates: 300s + retention: 0 + discard: 0 + versions: + - version: latest + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server + - version: 2.8.1 + natsImage: nats:2.8.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.1-alpine + natsImage: nats:2.8.1-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.8.2 + natsImage: nats:2.8.2 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.2-alpine + natsImage: nats:2.8.2-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.9.1 + natsImage: nats:2.9.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.12 + natsImage: nats:2.9.12 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.16 + natsImage: nats:2.9.16 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.10.10 + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-config-map.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-workflows-workflow-controller-configmap + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-cm + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +data: + config: | + metricsConfig: + enabled: true + path: /metrics + port: 9090 + ignoreErrors: false + secure: false + persistence: + archive: true + connectionPool: + maxIdleConns: 100 + maxOpenConns: 0 + nodeStatusOffLoad: true + postgresql: + database: app + host: argo-workflows-postgresql-17-cluster-rw + passwordSecret: + key: password + name: argo-workflows-postgresql-17-cluster-app + port: 5432 + ssl: false + sslMode: disable + tableName: app + userNameSecret: + key: username + name: argo-workflows-postgresql-17-cluster-app + sso: + issuer: https://authentik.alexlebens.net/application/o/argo-workflows/ + clientId: + name: argo-workflows-oidc-secret + key: client + clientSecret: + name: argo-workflows-oidc-secret + key: secret + redirectUrl: "https://argo-workflows.alexlebens.net/oauth2/callback" + rbac: + enabled: false + scopes: + - openid + - email + - profile + nodeEvents: + enabled: true + workflowEvents: + enabled: true +--- +# Source: argo-workflows/charts/argo-events/templates/crds/eventbus-crd.yml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: eventbus.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: EventBus + listKind: EventBusList + plural: eventbus + shortNames: + - eb + singular: eventbus + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: argo-workflows/charts/argo-events/templates/crds/eventsource-crd.yml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: eventsources.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: EventSource + listKind: EventSourceList + plural: eventsources + shortNames: + - es + singular: eventsource + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: argo-workflows/charts/argo-events/templates/crds/sensor-crd.yml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sensors.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: Sensor + listKind: SensorList + plural: sensors + shortNames: + - sn + singular: sensor + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_clusterworkflowtemplates.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_cronworkflows.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cronworkflows.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_workflowartifactgctasks.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowartifactgctasks.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: WorkflowArtifactGCTask + listKind: WorkflowArtifactGCTaskList + plural: workflowartifactgctasks + shortNames: + - wfat + singular: workflowartifactgctask + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + artifactsByNode: + additionalProperties: + properties: + archiveLocation: + properties: + archiveLogs: + type: boolean + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + raw: + properties: + data: + type: string + required: + - data + type: object + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + type: object + artifacts: + additionalProperties: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: object + type: object + type: object + type: object + status: + properties: + artifactResultsByNode: + additionalProperties: + properties: + artifactResults: + additionalProperties: + properties: + error: + type: string + name: + type: string + success: + type: boolean + required: + - name + type: object + type: object + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_workfloweventbindings.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workfloweventbindings.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + event: + properties: + selector: + type: string + required: + - selector + type: object + submit: + properties: + arguments: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + generateName: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + workflowTemplateRef: + properties: + clusterScope: + type: boolean + name: + type: string + type: object + required: + - workflowTemplateRef + type: object + required: + - event + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_workflows.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflows.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: Workflow + listKind: WorkflowList + plural: workflows + shortNames: + - wf + singular: workflow + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the workflow + jsonPath: .status.phase + name: Status + type: string + - description: When the workflow was started + format: date-time + jsonPath: .status.startedAt + name: Age + type: date + - description: Human readable message indicating details about why the workflow + is in this condition. + jsonPath: .status.message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_workflowtaskresults.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtaskresults.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: WorkflowTaskResult + listKind: WorkflowTaskResultList + plural: workflowtaskresults + singular: workflowtaskresult + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + message: + type: string + metadata: + type: object + outputs: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactGC: + properties: + podMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + serviceAccountName: + type: string + strategy: + enum: + - "" + - OnWorkflowCompletion + - OnWorkflowDeletion + - Never + type: string + type: object + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - url + type: object + azure: + properties: + accountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + blob: + type: string + container: + type: string + endpoint: + type: string + useSDKCreds: + type: boolean + required: + - blob + - container + - endpoint + type: object + deleted: + type: boolean + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - key + type: object + git: + properties: + branch: + type: string + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + insecureSkipTLS: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + repo: + type: string + revision: + type: string + singleBranch: + type: boolean + sshPrivateKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + dataTransferProtection: + type: string + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbConfigConfigMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbKeytabSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + auth: + properties: + basicAuth: + properties: + passwordSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + usernameSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientCert: + properties: + clientCertSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + oauth2: + properties: + clientIDSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + clientSecretSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + items: + properties: + key: + type: string + value: + type: string + required: + - key + type: object + type: array + scopes: + items: + type: string + type: array + tokenURLSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + securityToken: + type: string + useSDKCreds: + type: boolean + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + bucket: + type: string + caSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sessionTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + exitCode: + type: string + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + result: + type: string + type: object + phase: + type: string + progress: + type: string + required: + - metadata + type: object + served: true + storage: true +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_workflowtasksets.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtasksets.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: WorkflowTaskSet + listKind: WorkflowTaskSetList + plural: workflowtasksets + shortNames: + - wfts + singular: workflowtaskset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: argo-workflows/charts/argo-workflows/templates/crds/argoproj.io_workflowtemplates.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workflowtemplates.argoproj.io + annotations: + "helm.sh/resource-policy": keep +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + shortNames: + - wftmpl + singular: workflowtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-argo-events-controller-manager + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - argoproj.io + resources: + - sensors + - sensors/finalizers + - sensors/status + - eventsources + - eventsources/finalizers + - eventsources/status + - eventbus + - eventbus/finalizers + - eventbus/status + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - configmaps + - services + - persistentvolumeclaims + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-webhook/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-events-webhook + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - argoproj.io + resources: + - eventbus + - eventsources + - sensors + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - get + - list +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-aggregate-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-view + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + - workflowartifactgctasks + - workflowartifactgctasks/finalizers + verbs: + - get + - list + - watch +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-aggregate-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-edit + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + - workflowartifactgctasks + - workflowartifactgctasks/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-aggregate-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-admin + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + - workflowartifactgctasks + - workflowartifactgctasks/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-cluster-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-workflow-controller + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + - namespaces + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtasksets/status + - workflowartifactgctasks + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - list + - watch + - deletecollection +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - "policy" + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete +- apiGroups: + - "" + resources: + - secrets + resourceNames: + - argo-workflows-postgresql-17-cluster-app + - argo-workflows-postgresql-17-cluster-app + verbs: + - get +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - workflow-controller + - workflow-controller-lease + verbs: + - get + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + resourceNames: + - argo-workflows-agent-ca-certificates +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-cluster-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-workflow-controller-cluster-template + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +rules: +- apiGroups: + - argoproj.io + resources: + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-cluster-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-server + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +rules: +- apiGroups: + - "" + resources: + - configmaps + - events + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list +- apiGroups: + - "" + resources: + - secrets + resourceNames: + - sso + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - create +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - "" + resources: + - events + verbs: + - watch + - create + - patch +- apiGroups: + - "" + resources: + - secrets + resourceNames: + - argo-workflows-postgresql-17-cluster-app + - argo-workflows-postgresql-17-cluster-app + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - eventsources + - sensors + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-cluster-roles.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-workflows-server-cluster-template + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +rules: +- apiGroups: + - argoproj.io + resources: + - clusterworkflowtemplates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflows-argo-events-controller-manager + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-workflows-argo-events-controller-manager +subjects: +- kind: ServiceAccount + name: argo-workflows-argo-events-controller-manager + namespace: "argo-workflows" +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-webhook/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflows-argo-events-events-webhook + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-events-webhook +subjects: +- kind: ServiceAccount + name: argo-workflows-argo-events-events-webhook + namespace: "argo-workflows" +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-crb.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflows-workflow-controller + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-workflows-workflow-controller +subjects: + - kind: ServiceAccount + name: argo-workflows-workflow-controller + namespace: "argo-workflows" +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-crb.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflows-workflow-controller-cluster-template + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-workflows-workflow-controller-cluster-template +subjects: + - kind: ServiceAccount + name: argo-workflows-workflow-controller + namespace: "argo-workflows" +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-crb.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflows-server + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-workflows-server +subjects: +- kind: ServiceAccount + name: argo-workflows-server + namespace: "argo-workflows" +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-crb.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflows-server-cluster-template + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-workflows-server-cluster-template +subjects: +- kind: ServiceAccount + name: argo-workflows-server + namespace: "argo-workflows" +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-workflows-workflow + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + namespace: argocd +rules: + - apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - create + - patch +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-workflows-workflow + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + namespace: argo-workflows +rules: + - apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - create + - patch +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-rb.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-workflows-workflow + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + namespace: argocd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-workflows-workflow +subjects: + - kind: ServiceAccount + name: argo-workflow + namespace: argocd +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-rb.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-workflows-workflow + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + namespace: argo-workflows +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-workflows-workflow +subjects: + - kind: ServiceAccount + name: argo-workflow + namespace: argo-workflows +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: argo-workflows-argo-events-controller-manager-metrics + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager-metrics + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +spec: + ports: + - name: metrics + protocol: TCP + port: 8082 + targetPort: metrics + selector: + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-webhook/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: events-webhook + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +spec: + ports: + - port: 443 + targetPort: webhook + selector: + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: argo-workflows-workflow-controller + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "v3.7.4" +spec: + ports: + - name: metrics + port: 8080 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + sessionAffinity: None + type: ClusterIP +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: argo-workflows-server + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "v3.7.4" +spec: + ports: + - port: 2746 + targetPort: 2746 + selector: + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + sessionAffinity: None + type: ClusterIP +--- +# Source: argo-workflows/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: garage-ps10rp + namespace: argo-workflows + labels: + app.kubernetes.io/name: garage-ps10rp + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + annotations: + tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-workflows-argo-events-controller-manager + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events + app.kubernetes.io/version: "v1.9.8" +spec: + selector: + matchLabels: + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + revisionHistoryLimit: 5 + replicas: 1 + template: + metadata: + annotations: + checksum/config: f6a1927c244b342165a873cdc9d662816fc3abe8ddd10d2cb5f6f6aa3b5553f0 + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events + app.kubernetes.io/version: "v1.9.8" + spec: + containers: + - name: controller-manager + image: quay.io/argoproj/argo-events:v1.9.8 + imagePullPolicy: IfNotPresent + args: + - controller + env: + - name: ARGO_EVENTS_IMAGE + value: quay.io/argoproj/argo-events:v1.9.8 + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config + mountPath: /etc/argo-events + ports: + - name: metrics + containerPort: 7777 + protocol: TCP + - name: probe + containerPort: 8081 + protocol: TCP + livenessProbe: + httpGet: + port: probe + path: /healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + port: probe + path: /readyz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + resources: + requests: + cpu: 10m + memory: 128Mi + serviceAccountName: argo-workflows-argo-events-controller-manager + volumes: + - name: config + configMap: + name: argo-workflows-argo-events-controller-manager +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-webhook/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: events-webhook + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: events-webhook + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events + app.kubernetes.io/version: "v1.9.8" +spec: + selector: + matchLabels: + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + revisionHistoryLimit: 5 + replicas: 1 + template: + metadata: + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-events-webhook + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: events-webhook + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events + app.kubernetes.io/version: "v1.9.8" + spec: + containers: + - name: events-webhook + image: quay.io/argoproj/argo-events:v1.9.8 + imagePullPolicy: IfNotPresent + args: + - webhook-service + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PORT + value: "443" + ports: + - name: webhook + containerPort: 443 + protocol: TCP + livenessProbe: + tcpSocket: + port: webhook + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + tcpSocket: + port: webhook + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + resources: + requests: + cpu: 10m + memory: 128Mi + serviceAccountName: argo-workflows-argo-events-events-webhook +--- +# Source: argo-workflows/charts/argo-workflows/templates/controller/workflow-controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-workflows-workflow-controller + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "v3.7.4" +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + template: + metadata: + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-workflow-controller + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: workflow-controller + app: workflow-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "v3.7.4" + spec: + serviceAccountName: argo-workflows-workflow-controller + containers: + - name: controller + image: "quay.io/argoproj/workflow-controller:v3.7.4" + imagePullPolicy: Always + command: [ "workflow-controller" ] + args: + - "--configmap" + - "argo-workflows-workflow-controller-configmap" + - "--executor-image" + - "quay.io/argoproj/argoexec:v3.7.4" + - "--loglevel" + - "info" + - "--gloglevel" + - "0" + - "--log-format" + - "text" + - "--workflow-workers" + - "2" + - "--workflow-ttl-workers" + - "1" + - "--pod-cleanup-workers" + - "1" + - "--cron-workflow-workers" + - "1" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + env: + - name: ARGO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: LEADER_ELECTION_DISABLE + value: "true" + resources: + requests: + cpu: 10m + memory: 128Mi + ports: + - name: metrics + containerPort: 9090 + - containerPort: 6060 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + nodeSelector: + kubernetes.io/os: linux +--- +# Source: argo-workflows/charts/argo-workflows/templates/server/server-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-workflows-server + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "v3.7.4" +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + template: + metadata: + labels: + helm.sh/chart: argo-workflows-0.45.28 + app.kubernetes.io/name: argo-workflows-server + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: server + app: server + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "v3.7.4" + annotations: + checksum/cm: e8ff26f31dc8c0bcbe056323d4a2f665788a48ff7a9c14c1ab61985933fa085e + spec: + serviceAccountName: argo-workflows-server + containers: + - name: argo-server + image: "quay.io/argoproj/argocli:v3.7.4" + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + args: + - server + - --configmap=argo-workflows-workflow-controller-configmap + - "--auth-mode=sso" + - "--secure=false" + - "--loglevel" + - "info" + - "--gloglevel" + - "0" + - "--log-format" + - "text" + ports: + - name: web + containerPort: 2746 + readinessProbe: + httpGet: + path: / + port: 2746 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 20 + env: + - name: IN_CLUSTER + value: "true" + - name: ARGO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: ARGO_BASE_HREF + value: "/" + resources: + {} + volumeMounts: + - name: tmp + mountPath: /tmp + terminationGracePeriodSeconds: 30 + volumes: + - name: tmp + emptyDir: {} + nodeSelector: + kubernetes.io/os: linux +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: argo-workflows-postgresql-17-cluster + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "argo-workflows-postgresql-17-external-backup" + serverName: "argo-workflows-postgresql-17-backup-1" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "argo-workflows-postgresql-17-garage-local-backup" + serverName: "argo-workflows-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "argo-workflows-postgresql-17-recovery" + serverName: argo-workflows-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: argo-workflows-postgresql-17-backup-1 + + externalClusters: + - name: argo-workflows-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "argo-workflows-postgresql-17-recovery" + serverName: argo-workflows-postgresql-17-backup-1 +--- +# Source: argo-workflows/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: argo-workflows-oidc-secret + namespace: argo-workflows + labels: + app.kubernetes.io/name: argo-workflows-oidc-secret + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/argo-workflows + metadataPolicy: None + property: secret + - secretKey: client + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/argo-workflows + metadataPolicy: None + property: client +--- +# Source: argo-workflows/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: argo-workflows-postgresql-17-cluster-backup-secret + namespace: argo-workflows + labels: + app.kubernetes.io/name: argo-workflows-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: argo-workflows/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: argo-workflows-postgresql-17-cluster-backup-secret-garage + namespace: argo-workflows + labels: + app.kubernetes.io/name: argo-workflows-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: argo-workflows/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-argo-workflows + namespace: argo-workflows + labels: + app.kubernetes.io/name: http-route-argo-workflows + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - argo-workflows.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: argo-workflows-server + port: 2746 + weight: 100 +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "argo-workflows-postgresql-17-external-backup" + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: argo-workflows-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: argo-workflows-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "argo-workflows-postgresql-17-garage-local-backup" + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: argo-workflows-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: argo-workflows-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: argo-workflows-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "argo-workflows-postgresql-17-recovery" + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: argo-workflows-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: argo-workflows-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: argo-workflows-postgresql-17-alert-rules + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/argo-workflows-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="argo-workflows"} - cnpg_pg_replication_is_wal_receiver_up{namespace="argo-workflows"}) < 1 + for: 5m + labels: + severity: critical + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="argo-workflows"} - cnpg_pg_replication_is_wal_receiver_up{namespace="argo-workflows"}) < 2 + for: 5m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="argo-workflows", pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="argo-workflows", pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="argo-workflows", pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="argo-workflows", pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="argo-workflows",pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="argo-workflows", pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="argo-workflows", persistentvolumeclaim=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="argo-workflows",pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "argo-workflows/argo-workflows-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="argo-workflows", pod=~"argo-workflows-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: argo-workflows + cnpg_cluster: argo-workflows-postgresql-17-cluster +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "argo-workflows-postgresql-17-daily-backup-scheduled-backup" + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: argo-workflows-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "argo-workflows-postgresql-17-external-backup" +--- +# Source: argo-workflows/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "argo-workflows-postgresql-17-live-backup-scheduled-backup" + namespace: argo-workflows + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: argo-workflows-postgresql-17 + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/part-of: argo-workflows + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: argo-workflows-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "argo-workflows-postgresql-17-garage-local-backup" +--- +# Source: argo-workflows/charts/argo-events/templates/argo-events-controller/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: argo-workflows-argo-events-controller-manager + namespace: "argo-workflows" + labels: + helm.sh/chart: argo-events-2.4.17 + app.kubernetes.io/name: argo-events-controller-manager + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: argo-events +spec: + endpoints: + - port: metrics + interval: 30s + path: /metrics + namespaceSelector: + matchNames: + - "argo-workflows" + selector: + matchLabels: + app.kubernetes.io/name: argo-events-controller-manager-metrics + app.kubernetes.io/instance: argo-workflows + app.kubernetes.io/component: controller-manager diff --git a/clusters/cl01tl/manifests/gatus/gatus.yaml b/clusters/cl01tl/manifests/gatus/gatus.yaml new file mode 100644 index 000000000..626653789 --- /dev/null +++ b/clusters/cl01tl/manifests/gatus/gatus.yaml @@ -0,0 +1,1527 @@ +--- +# Source: gatus/charts/gatus/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: gatus + namespace: gatus + labels: + helm.sh/chart: gatus-1.4.4 + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + app.kubernetes.io/version: "v5.33.0" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + alerting: + ntfy: + click: https://gatus.alexlebens.net + default-alert: + failure-threshold: 5 + send-on-resolved: true + priority: 3 + token: ${NTFY_TOKEN} + topic: gatus-alerts + url: http://ntfy.ntfy + connectivity: + checker: + interval: 60s + target: 1.1.1.1:53 + default-endpoint: + alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + endpoints: + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 401' + group: core + interval: 30s + name: plex + url: http://plex.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: jellyfin + url: https://jellyfin.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: overseerr + url: https://overseerr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: yamtrack + url: https://yamtrack.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: tubearchivist + url: https://tubearchivist.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: immich + url: https://immich.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: photoview + url: https://photoview.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: audiobookshelf + url: https://audiobookshelf.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: home-assistant + url: https://home-assistant.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: actual + url: https://actual.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: ollama + url: https://ollama.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: searxng + url: https://searxng.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: roundcube + url: https://mail.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: kiwix + url: https://kiwix.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: gitea + url: https://gitea.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: home-assistant-code-server + url: https://home-assistant-code-server.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: argocd + url: https://argocd.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: komodo + url: https://komodo.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: argo-workflows + url: https://argo-workflows.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: n8n + url: https://n8n.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: omni-tools + url: https://omni-tools.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: headlamp + url: https://headlamp.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: hubble + url: https://hubble.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: grafana + url: https://grafana.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: prometheus + url: https://prometheus.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: alertmanager + url: https://alertmanager.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: tautulli + url: https://tautulli.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: jellystat + url: https://jellystat.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: authentik + url: https://authentik.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: stalwart + url: https://stalwart.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: ntfy + url: https://ntfy.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: traefik-cl01tl + url: https://traefik-cl01tl.alexlebens.net/dashboard/#/ + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: harbor + url: https://harbor.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: unifi + url: https://unifi.alexlebens.net + - alerts: + - type: ntfy + client: + insecure: true + conditions: + - '[CONNECTED] == true' + group: core + interval: 30s + name: synology + url: https://synology.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + group: core + interval: 30s + name: hdhr + url: http://hdhr.alexlebens.net + - alerts: + - type: ntfy + client: + insecure: true + conditions: + - '[CONNECTED] == true' + group: core + interval: 30s + name: pikvm + url: https://pikvm.alexlebens.net/login/ + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + group: core + interval: 30s + name: shelly + url: http://it05sp.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: ceph + url: https://ceph.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: pgadmin + url: https://pgadmin.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: whodb + url: https://whodb.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: vault + url: https://vault.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: sonarr + url: https://sonarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: sonarr-4k + url: https://sonarr-4k.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: sonarr-anime + url: https://sonarr-anime.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: radarr + url: https://radarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: radarr-4k + url: https://radarr-4k.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: radarr-anime + url: https://radarr-anime.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: radarr-standup + url: https://radarr-standup.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: lidarr + url: https://lidarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: lidatube + url: https://lidatube.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: slskd + url: https://slskd.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: qui + url: https://qui.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: qbittorrent + url: https://qbittorrent.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: prowlarr + url: https://prowlarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 401' + group: core + interval: 30s + name: bazarr + url: https://bazarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: huntarr + url: https://huntarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: core + interval: 30s + name: tdarr + url: https://tdarr.alexlebens.net + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: www + url: https://www.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: directus + url: https://directus.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 120s + name: postiz + url: https://postiz.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: matrix + url: https://chat.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: outline + url: https://wiki.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: vaultwarden + url: https://passwords.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: karakeep + url: https://karakeep.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 401' + group: external + interval: 30s + name: freshrss + url: https://rss.alexlebens.dev/i/ + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: gitea-external + url: https://gitea.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: codeserver + url: https://codeserver.alexlebens.dev + - alerts: + - type: ntfy + conditions: + - '[STATUS] == 200' + - '[CERTIFICATE_EXPIRATION] > 240h' + group: external + interval: 30s + name: public homepage + url: https://home.alexlebens.dev + - conditions: + - '[STATUS] == 200' + - '[RESPONSE_TIME] < 400' + group: public + interval: 10s + name: discord + url: https://discord.com/app + - conditions: + - '[STATUS] == 200' + - '[RESPONSE_TIME] < 400' + group: public + interval: 10s + name: reddit + url: https://reddit.com + metrics: true + security: + oidc: + client-id: ${OIDC_CLIENT_ID} + client-secret: ${OIDC_CLIENT_SECRET} + issuer-url: https://authentik.alexlebens.net/application/o/gatus/ + redirect-url: https://gatus.alexlebens.net/authorization-code/callback + scopes: + - openid + storage: + path: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}?sslmode=disable + type: postgres +--- +# Source: gatus/charts/gatus/templates/pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: gatus + namespace: gatus + labels: + helm.sh/chart: gatus-1.4.4 + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + app.kubernetes.io/version: "v5.33.0" + app.kubernetes.io/managed-by: Helm + finalizers: + - kubernetes.io/pvc-protection +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" + storageClassName: ceph-block +--- +# Source: gatus/charts/gatus/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: gatus + namespace: gatus + labels: + helm.sh/chart: gatus-1.4.4 + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + app.kubernetes.io/version: "v5.33.0" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: http + port: 80 + targetPort: http + protocol: TCP + selector: + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus +--- +# Source: gatus/charts/gatus/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gatus + namespace: gatus + labels: + helm.sh/chart: gatus-1.4.4 + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + app.kubernetes.io/version: "v5.33.0" + app.kubernetes.io/managed-by: Helm + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + annotations: + checksum/config: 31c72cc890f0e181e18d8c8f38bec005f1ea27d18aeaebc241351d1e3d5dd21d + spec: + + serviceAccountName: default + automountServiceAccountToken: false + securityContext: + fsGroup: 65534 + containers: + - name: gatus + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + image: "ghcr.io/twin/gatus:v5.33.0" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + protocol: TCP + env: + - name: "NTFY_TOKEN" + valueFrom: + secretKeyRef: + key: NTFY_TOKEN + name: gatus-config-secret + - name: "OIDC_CLIENT_ID" + valueFrom: + secretKeyRef: + key: OIDC_CLIENT_ID + name: gatus-oidc-secret + - name: "OIDC_CLIENT_SECRET" + valueFrom: + secretKeyRef: + key: OIDC_CLIENT_SECRET + name: gatus-oidc-secret + - name: "POSTGRES_DB" + valueFrom: + secretKeyRef: + key: dbname + name: gatus-postgresql-17-cluster-app + - name: "POSTGRES_HOST" + valueFrom: + secretKeyRef: + key: host + name: gatus-postgresql-17-cluster-app + - name: "POSTGRES_PASSWORD" + valueFrom: + secretKeyRef: + key: password + name: gatus-postgresql-17-cluster-app + - name: "POSTGRES_PORT" + valueFrom: + secretKeyRef: + key: port + name: gatus-postgresql-17-cluster-app + - name: "POSTGRES_USER" + valueFrom: + secretKeyRef: + key: username + name: gatus-postgresql-17-cluster-app + envFrom: + - configMapRef: + name: gatus + readinessProbe: + httpGet: + path: /health + port: http + livenessProbe: + httpGet: + path: /health + port: http + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - name: gatus-config + mountPath: /config + readOnly: true + - name: gatus-data + mountPath: /data + volumes: + - name: gatus-config + configMap: + name: gatus + - name: gatus-data + persistentVolumeClaim: + claimName: gatus +--- +# Source: gatus/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: gatus-postgresql-17-cluster + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "gatus-postgresql-17-external-backup" + serverName: "gatus-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "gatus-postgresql-17-garage-local-backup" + serverName: "gatus-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "gatus-postgresql-17-recovery" + serverName: gatus-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: gatus-postgresql-17-backup-1 + + externalClusters: + - name: gatus-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "gatus-postgresql-17-recovery" + serverName: gatus-postgresql-17-backup-1 +--- +# Source: gatus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: gatus-config-secret + namespace: gatus + labels: + app.kubernetes.io/name: gatus-config-secret + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: NTFY_TOKEN + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /ntfy/user/cl01tl + metadataPolicy: None + property: token +--- +# Source: gatus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: gatus-oidc-secret + namespace: gatus + labels: + app.kubernetes.io/name: gatus-oidc-secret + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: OIDC_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/gatus + metadataPolicy: None + property: client + - secretKey: OIDC_CLIENT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/gatus + metadataPolicy: None + property: secret +--- +# Source: gatus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: gatus-postgresql-17-cluster-backup-secret + namespace: gatus + labels: + app.kubernetes.io/name: gatus-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: gatus/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: gatus-postgresql-17-cluster-backup-secret-garage + namespace: gatus + labels: + app.kubernetes.io/name: gatus-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: gatus/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-gatus + namespace: gatus + labels: + app.kubernetes.io/name: http-route-gatus + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - gatus.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: gatus + port: 80 + weight: 100 +--- +# Source: gatus/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "gatus-postgresql-17-external-backup" + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/gatus/gatus-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: gatus-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: gatus-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: gatus/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "gatus-postgresql-17-garage-local-backup" + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/gatus/gatus-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: gatus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: gatus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: gatus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: gatus/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "gatus-postgresql-17-recovery" + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/gatus/gatus-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: gatus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: gatus-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: gatus/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: gatus-postgresql-17-alert-rules + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/gatus-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="gatus"} - cnpg_pg_replication_is_wal_receiver_up{namespace="gatus"}) < 1 + for: 5m + labels: + severity: critical + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="gatus"} - cnpg_pg_replication_is_wal_receiver_up{namespace="gatus"}) < 2 + for: 5m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="gatus", pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="gatus", pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="gatus", pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="gatus", pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="gatus",pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="gatus", pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="gatus", persistentvolumeclaim=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="gatus",pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "gatus/gatus-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="gatus", pod=~"gatus-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: gatus + cnpg_cluster: gatus-postgresql-17-cluster +--- +# Source: gatus/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "gatus-postgresql-17-daily-backup-scheduled-backup" + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: gatus-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "gatus-postgresql-17-external-backup" +--- +# Source: gatus/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "gatus-postgresql-17-live-backup-scheduled-backup" + namespace: gatus + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: gatus-postgresql-17 + app.kubernetes.io/instance: gatus + app.kubernetes.io/part-of: gatus + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: gatus-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "gatus-postgresql-17-garage-local-backup" +--- +# Source: gatus/charts/gatus/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: gatus + namespace: gatus + labels: + helm.sh/chart: gatus-1.4.4 + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + app.kubernetes.io/version: "v5.33.0" + app.kubernetes.io/managed-by: Helm +spec: + endpoints: + - port: http + interval: 1m + scrapeTimeout: 30s + honorLabels: true + path: /metrics + scheme: http + jobLabel: "gatus" + selector: + matchLabels: + app.kubernetes.io/name: gatus + app.kubernetes.io/instance: gatus + namespaceSelector: + matchNames: + - gatus diff --git a/clusters/cl01tl/manifests/grafana-operator/grafana-operator.yaml b/clusters/cl01tl/manifests/grafana-operator/grafana-operator.yaml new file mode 100644 index 000000000..fdddb5278 --- /dev/null +++ b/clusters/cl01tl/manifests/grafana-operator/grafana-operator.yaml @@ -0,0 +1,10862 @@ +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanaalertrulegroups.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanaalertrulegroups.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaAlertRuleGroup + listKind: GrafanaAlertRuleGroupList + plural: grafanaalertrulegroups + singular: grafanaalertrulegroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaAlertRuleGroup is the Schema for the grafanaalertrulegroups + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaAlertRuleGroupSpec defines the desired state of GrafanaAlertRuleGroup + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + editable: + description: Whether to enable or disable editing of the alert rule + group in Grafana UI + type: boolean + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + folderRef: + description: Match GrafanaFolders CRs to infer the uid + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + folderUID: + description: |- + UID of the folder containing this rule group + Overrides the FolderSelector + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + interval: + format: duration + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + name: + description: Name of the alert rule group. If not specified, the resource + name will be used. + type: string + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + rules: + items: + description: AlertRule defines a specific rule to be evaluated. + It is based on the upstream model with some k8s specific type + mappings + properties: + annotations: + additionalProperties: + type: string + type: object + condition: + type: string + data: + items: + properties: + datasourceUid: + description: Grafana data source unique identifier; it + should be '__expr__' for a Server Side Expression operation. + type: string + model: + description: JSON is the raw JSON query and includes the + above properties as well as custom properties. + x-kubernetes-preserve-unknown-fields: true + queryType: + description: |- + QueryType is an optional identifier for the type of query. + It can be used to distinguish different types of queries. + type: string + refId: + description: RefID is the unique identifier of the query, + set by the frontend call. + type: string + relativeTimeRange: + description: relative time range + properties: + from: + description: from + format: int64 + type: integer + to: + description: to + format: int64 + type: integer + type: object + type: object + type: array + execErrState: + enum: + - OK + - Alerting + - Error + - KeepLast + type: string + for: + default: 0s + format: duration + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + isPaused: + type: boolean + keepFiringFor: + format: duration + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + labels: + additionalProperties: + type: string + type: object + missingSeriesEvalsToResolve: + description: The number of missing series evaluations that must + occur before the rule is considered to be resolved. + format: int64 + type: integer + noDataState: + enum: + - Alerting + - NoData + - OK + - KeepLast + type: string + notificationSettings: + properties: + group_by: + items: + type: string + type: array + group_interval: + type: string + group_wait: + type: string + mute_time_intervals: + items: + type: string + type: array + receiver: + type: string + repeat_interval: + type: string + required: + - receiver + type: object + record: + properties: + from: + type: string + metric: + type: string + required: + - from + - metric + type: object + title: + example: Always firing + maxLength: 190 + minLength: 1 + type: string + uid: + description: UID of the alert rule. Can be any string consisting + of alphanumeric characters, - and _ with a maximum length + of 40 + maxLength: 40 + pattern: ^[a-zA-Z0-9-_]+$ + type: string + required: + - condition + - data + - execErrState + - for + - noDataState + - title + - uid + type: object + minItems: 1 + type: array + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + required: + - instanceSelector + - interval + - rules + type: object + x-kubernetes-validations: + - message: Only one of FolderUID or FolderRef can be set and one must + be defined + rule: (has(self.folderUID) && !(has(self.folderRef))) || (has(self.folderRef) + && !(has(self.folderUID))) + - message: spec.editable is immutable + rule: ((!has(oldSelf.editable) && !has(self.editable)) || (has(oldSelf.editable) + && has(self.editable))) + - message: spec.folderUID is immutable + rule: ((!has(oldSelf.folderUID) && !has(self.folderUID)) || (has(oldSelf.folderUID) + && has(self.folderUID))) + - message: spec.folderRef is immutable + rule: ((!has(oldSelf.folderRef) && !has(self.folderRef)) || (has(oldSelf.folderRef) + && has(self.folderRef))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: The most recent observed state of a Grafana resource + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanacontactpoints.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanacontactpoints.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaContactPoint + listKind: GrafanaContactPointList + plural: grafanacontactpoints + singular: grafanacontactpoint + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaContactPoint is the Schema for the grafanacontactpoints + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaContactPointSpec defines the desired state of GrafanaContactPoint + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + disableResolveMessage: + type: boolean + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + name: + type: string + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + settings: + x-kubernetes-preserve-unknown-fields: true + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + type: + minLength: 1 + type: string + uid: + description: Manually specify the UID the Contact Point is created + with. Can be any string consisting of alphanumeric characters, - + and _ with a maximum length of 40 + maxLength: 40 + pattern: ^[a-zA-Z0-9-_]+$ + type: string + x-kubernetes-validations: + - message: spec.uid is immutable + rule: self == oldSelf + valuesFrom: + items: + properties: + targetPath: + type: string + valueFrom: + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: Either configMapKeyRef or secretKeyRef must be set + rule: (has(self.configMapKeyRef) && !has(self.secretKeyRef)) + || (!has(self.configMapKeyRef) && has(self.secretKeyRef)) + required: + - targetPath + - valueFrom + type: object + maxItems: 99 + type: array + required: + - instanceSelector + - name + - settings + - type + type: object + x-kubernetes-validations: + - message: spec.uid is immutable + rule: ((!has(oldSelf.uid) && !has(self.uid)) || (has(oldSelf.uid) && + has(self.uid))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: The most recent observed state of a Grafana resource + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanadashboards.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanadashboards.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaDashboard + listKind: GrafanaDashboardList + plural: grafanadashboards + singular: grafanadashboard + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.NoMatchingInstances + name: No matching instances + type: boolean + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaDashboard is the Schema for the grafanadashboards API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaDashboardSpec defines the desired state of GrafanaDashboard + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + configMapRef: + description: model from configmap + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + contentCacheDuration: + description: Cache duration for models fetched from URLs + type: string + datasources: + description: maps required data sources to existing ones + items: + description: |- + GrafanaResourceDatasource is used to set the datasource name of any templated datasources in + content definitions (e.g., dashboard JSON). + properties: + datasourceName: + type: string + inputName: + type: string + required: + - datasourceName + - inputName + type: object + type: array + envFrom: + description: environments variables from secrets or config maps + items: + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envs: + description: environments variables as a map + items: + properties: + name: + type: string + value: + description: Inline env value + type: string + valueFrom: + description: Reference on value source, might be the reference + on a secret or config map + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + folder: + description: folder assignment for dashboard + type: string + folderRef: + description: Name of a `GrafanaFolder` resource in the same namespace + type: string + folderUID: + description: UID of the target folder for this dashboard + type: string + grafanaCom: + description: grafana.com/dashboards + properties: + id: + type: integer + revision: + type: integer + required: + - id + type: object + gzipJson: + description: GzipJson the model's JSON compressed with Gzip. Base64-encoded + when in YAML. + format: byte + type: string + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + json: + description: model json + type: string + jsonnet: + description: Jsonnet + type: string + jsonnetLib: + description: Jsonnet project build + properties: + fileName: + type: string + gzipJsonnetProject: + format: byte + type: string + jPath: + items: + type: string + type: array + required: + - fileName + - gzipJsonnetProject + type: object + plugins: + description: plugins + items: + properties: + name: + minLength: 1 + type: string + version: + pattern: ^((0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?|latest)$ + type: string + required: + - name + - version + type: object + type: array + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + uid: + description: |- + Manually specify the uid, overwrites uids already present in the json model. + Can be any string consisting of alphanumeric characters, - and _ with a maximum length of 40. + maxLength: 40 + pattern: ^[a-zA-Z0-9-_]+$ + type: string + x-kubernetes-validations: + - message: spec.uid is immutable + rule: self == oldSelf + url: + description: model url + type: string + urlAuthorization: + description: authorization options for model from url + properties: + basicAuth: + properties: + password: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + required: + - instanceSelector + type: object + x-kubernetes-validations: + - message: Only one of folderUID or folderRef can be declared at the same + time + rule: (has(self.folderUID) && !(has(self.folderRef))) || (has(self.folderRef) + && !(has(self.folderUID))) || !(has(self.folderRef) && (has(self.folderUID))) + - message: folder field cannot be set when folderUID or folderRef is already + declared + rule: (has(self.folder) && !(has(self.folderRef) || has(self.folderUID))) + || !(has(self.folder)) + - message: spec.uid is immutable + rule: ((!has(oldSelf.uid) && !has(self.uid)) || (has(oldSelf.uid) && + has(self.uid))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: GrafanaDashboardStatus defines the observed state of GrafanaDashboard + properties: + NoMatchingInstances: + description: The dashboard instanceSelector can't find matching grafana + instances + type: boolean + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + contentCache: + format: byte + type: string + contentTimestamp: + format: date-time + type: string + contentUrl: + type: string + hash: + type: string + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + uid: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanadatasources.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanadatasources.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaDatasource + listKind: GrafanaDatasourceList + plural: grafanadatasources + singular: grafanadatasource + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.NoMatchingInstances + name: No matching instances + type: boolean + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaDatasource is the Schema for the grafanadatasources API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaDatasourceSpec defines the desired state of GrafanaDatasource + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + datasource: + properties: + access: + type: string + basicAuth: + type: boolean + basicAuthUser: + type: string + database: + type: string + editable: + description: Whether to enable/disable editing of the datasource + in Grafana UI + type: boolean + isDefault: + type: boolean + jsonData: + type: object + x-kubernetes-preserve-unknown-fields: true + name: + type: string + orgId: + description: Deprecated field, it has no effect + format: int64 + type: integer + secureJsonData: + type: object + x-kubernetes-preserve-unknown-fields: true + type: + type: string + uid: + description: Deprecated field, use spec.uid instead + type: string + url: + type: string + user: + type: string + type: object + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + plugins: + description: plugins + items: + properties: + name: + minLength: 1 + type: string + version: + pattern: ^((0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?|latest)$ + type: string + required: + - name + - version + type: object + type: array + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + uid: + description: |- + The UID, for the datasource, fallback to the deprecated spec.datasource.uid + and metadata.uid. Can be any string consisting of alphanumeric characters, + - and _ with a maximum length of 40 +optional + maxLength: 40 + pattern: ^[a-zA-Z0-9-_]+$ + type: string + x-kubernetes-validations: + - message: spec.uid is immutable + rule: self == oldSelf + valuesFrom: + description: environments variables from secrets or config maps + items: + properties: + targetPath: + type: string + valueFrom: + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: Either configMapKeyRef or secretKeyRef must be set + rule: (has(self.configMapKeyRef) && !has(self.secretKeyRef)) + || (!has(self.configMapKeyRef) && has(self.secretKeyRef)) + required: + - targetPath + - valueFrom + type: object + maxItems: 99 + type: array + required: + - datasource + - instanceSelector + type: object + x-kubernetes-validations: + - message: spec.uid is immutable + rule: ((!has(oldSelf.uid) && !has(self.uid)) || (has(oldSelf.uid) && + has(self.uid))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: GrafanaDatasourceStatus defines the observed state of GrafanaDatasource + properties: + NoMatchingInstances: + description: The datasource instanceSelector can't find matching grafana + instances + type: boolean + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + hash: + type: string + lastMessage: + description: 'Deprecated: Check status.conditions or operator logs' + type: string + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + uid: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanafolders.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanafolders.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaFolder + listKind: GrafanaFolderList + plural: grafanafolders + singular: grafanafolder + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.NoMatchingInstances + name: No matching instances + type: boolean + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaFolder is the Schema for the grafanafolders API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaFolderSpec defines the desired state of GrafanaFolder + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + parentFolderRef: + description: Reference to an existing GrafanaFolder CR in the same + namespace + type: string + parentFolderUID: + description: UID of the folder in which the current folder should + be created + type: string + permissions: + description: Raw json with folder permissions, potentially exported + from Grafana + type: string + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + title: + description: Display name of the folder in Grafana + type: string + uid: + description: Manually specify the UID the Folder is created with. + Can be any string consisting of alphanumeric characters, - and _ + with a maximum length of 40 + maxLength: 40 + pattern: ^[a-zA-Z0-9-_]+$ + type: string + x-kubernetes-validations: + - message: spec.uid is immutable + rule: self == oldSelf + required: + - instanceSelector + type: object + x-kubernetes-validations: + - message: Only one of parentFolderUID or parentFolderRef can be set + rule: (has(self.parentFolderUID) && !(has(self.parentFolderRef))) || + (has(self.parentFolderRef) && !(has(self.parentFolderUID))) || !(has(self.parentFolderRef) + && (has(self.parentFolderUID))) + - message: spec.uid is immutable + rule: ((!has(oldSelf.uid) && !has(self.uid)) || (has(oldSelf.uid) && + has(self.uid))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: GrafanaFolderStatus defines the observed state of GrafanaFolder + properties: + NoMatchingInstances: + description: The folder instanceSelector can't find matching grafana + instances + type: boolean + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + hash: + type: string + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanalibrarypanels.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanalibrarypanels.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaLibraryPanel + listKind: GrafanaLibraryPanelList + plural: grafanalibrarypanels + singular: grafanalibrarypanel + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaLibraryPanel is the Schema for the grafanalibrarypanels + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaLibraryPanelSpec defines the desired state of GrafanaLibraryPanel + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + configMapRef: + description: model from configmap + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + contentCacheDuration: + description: Cache duration for models fetched from URLs + type: string + datasources: + description: maps required data sources to existing ones + items: + description: |- + GrafanaResourceDatasource is used to set the datasource name of any templated datasources in + content definitions (e.g., dashboard JSON). + properties: + datasourceName: + type: string + inputName: + type: string + required: + - datasourceName + - inputName + type: object + type: array + envFrom: + description: environments variables from secrets or config maps + items: + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envs: + description: environments variables as a map + items: + properties: + name: + type: string + value: + description: Inline env value + type: string + valueFrom: + description: Reference on value source, might be the reference + on a secret or config map + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + folderRef: + description: Name of a `GrafanaFolder` resource in the same namespace + type: string + folderUID: + description: UID of the target folder for this dashboard + type: string + grafanaCom: + description: grafana.com/dashboards + properties: + id: + type: integer + revision: + type: integer + required: + - id + type: object + gzipJson: + description: GzipJson the model's JSON compressed with Gzip. Base64-encoded + when in YAML. + format: byte + type: string + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + json: + description: model json + type: string + jsonnet: + description: Jsonnet + type: string + jsonnetLib: + description: Jsonnet project build + properties: + fileName: + type: string + gzipJsonnetProject: + format: byte + type: string + jPath: + items: + type: string + type: array + required: + - fileName + - gzipJsonnetProject + type: object + plugins: + description: plugins + items: + properties: + name: + minLength: 1 + type: string + version: + pattern: ^((0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?|latest)$ + type: string + required: + - name + - version + type: object + type: array + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + uid: + description: |- + Manually specify the uid, overwrites uids already present in the json model. + Can be any string consisting of alphanumeric characters, - and _ with a maximum length of 40. + maxLength: 40 + pattern: ^[a-zA-Z0-9-_]+$ + type: string + x-kubernetes-validations: + - message: spec.uid is immutable + rule: self == oldSelf + url: + description: model url + type: string + urlAuthorization: + description: authorization options for model from url + properties: + basicAuth: + properties: + password: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + required: + - instanceSelector + type: object + x-kubernetes-validations: + - message: Only one of folderUID or folderRef can be declared at the same + time + rule: (has(self.folderUID) && !(has(self.folderRef))) || (has(self.folderRef) + && !(has(self.folderUID))) || !(has(self.folderRef) && (has(self.folderUID))) + - message: spec.uid is immutable + rule: ((!has(oldSelf.uid) && !has(self.uid)) || (has(oldSelf.uid) && + has(self.uid))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: GrafanaLibraryPanelStatus defines the observed state of GrafanaLibraryPanel + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + contentCache: + format: byte + type: string + contentTimestamp: + format: date-time + type: string + contentUrl: + type: string + hash: + type: string + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + uid: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanamutetimings.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanamutetimings.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaMuteTiming + listKind: GrafanaMuteTimingList + plural: grafanamutetimings + singular: grafanamutetiming + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaMuteTiming is the Schema for the GrafanaMuteTiming API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaMuteTimingSpec defines the desired state of GrafanaMuteTiming + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + editable: + default: true + description: Whether to enable or disable editing of the mute timing + in Grafana UI + type: boolean + x-kubernetes-validations: + - message: spec.editable is immutable + rule: self == oldSelf + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + name: + description: A unique name for the mute timing + type: string + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + time_intervals: + description: Time intervals for muting + items: + properties: + days_of_month: + description: |- + The date 1-31 of a month. Negative values can also be used to represent days that begin at the end of the month. + For example: -1 for the last day of the month. + items: + type: string + type: array + location: + description: Depending on the location, the time range is displayed + in local time. + type: string + months: + description: |- + The months of the year in either numerical or the full calendar month. + For example: 1, may. + items: + type: string + type: array + times: + description: The time inclusive of the start and exclusive of + the end time (in UTC if no location has been selected, otherwise + local time). + items: + properties: + end_time: + description: end time + type: string + start_time: + description: start time + type: string + required: + - end_time + - start_time + type: object + type: array + weekdays: + description: |- + The day or range of days of the week. + For example: monday, thursday + items: + type: string + type: array + years: + description: |- + The year or years for the interval. + For example: 2021 + items: + type: string + type: array + type: object + minItems: 1 + type: array + required: + - instanceSelector + - name + - time_intervals + type: object + x-kubernetes-validations: + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: The most recent observed state of a Grafana resource + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafananotificationpolicies.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafananotificationpolicies.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaNotificationPolicy + listKind: GrafanaNotificationPolicyList + plural: grafananotificationpolicies + singular: grafananotificationpolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaNotificationPolicy is the Schema for the GrafanaNotificationPolicy + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaNotificationPolicySpec defines the desired state of + GrafanaNotificationPolicy + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + editable: + description: Whether to enable or disable editing of the notification + policy in Grafana UI + type: boolean + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + route: + description: Routes for alerts to match against + properties: + continue: + description: continue + type: boolean + group_by: + description: group by + items: + type: string + type: array + group_interval: + description: group interval + type: string + group_wait: + description: group wait + type: string + match_re: + additionalProperties: + type: string + description: match re + type: object + matchers: + description: matchers + items: + properties: + isEqual: + description: is equal + type: boolean + isRegex: + description: is regex + type: boolean + name: + description: name + type: string + value: + description: value + type: string + required: + - isRegex + - value + type: object + type: array + mute_time_intervals: + description: mute time intervals + items: + type: string + type: array + object_matchers: + description: object matchers + items: + description: |- + ObjectMatcher ObjectMatcher is a matcher that can be used to filter alerts. + + swagger:model ObjectMatcher + items: + type: string + type: array + type: array + provenance: + description: provenance + type: string + receiver: + description: receiver + minLength: 1 + type: string + repeat_interval: + description: repeat interval + type: string + routeSelector: + description: |- + selects GrafanaNotificationPolicyRoutes to merge in when specified + mutually exclusive with Routes + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + routes: + description: routes, mutually exclusive with RouteSelector + x-kubernetes-preserve-unknown-fields: true + required: + - receiver + type: object + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + required: + - instanceSelector + - route + type: object + x-kubernetes-validations: + - message: spec.editable is immutable + rule: ((!has(oldSelf.editable) && !has(self.editable)) || (has(oldSelf.editable) + && has(self.editable))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: GrafanaNotificationPolicyStatus defines the observed state + of GrafanaNotificationPolicy + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + discoveredRoutes: + items: + type: string + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafananotificationpolicyroutes.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafananotificationpolicyroutes.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaNotificationPolicyRoute + listKind: GrafanaNotificationPolicyRouteList + plural: grafananotificationpolicyroutes + singular: grafananotificationpolicyroute + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaNotificationPolicyRoute is the Schema for the grafananotificationpolicyroutes + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaNotificationPolicyRouteSpec defines the desired state + of GrafanaNotificationPolicyRoute + properties: + continue: + description: continue + type: boolean + group_by: + description: group by + items: + type: string + type: array + group_interval: + description: group interval + type: string + group_wait: + description: group wait + type: string + match_re: + additionalProperties: + type: string + description: match re + type: object + matchers: + description: matchers + items: + properties: + isEqual: + description: is equal + type: boolean + isRegex: + description: is regex + type: boolean + name: + description: name + type: string + value: + description: value + type: string + required: + - isRegex + - value + type: object + type: array + mute_time_intervals: + description: mute time intervals + items: + type: string + type: array + object_matchers: + description: object matchers + items: + description: |- + ObjectMatcher ObjectMatcher is a matcher that can be used to filter alerts. + + swagger:model ObjectMatcher + items: + type: string + type: array + type: array + provenance: + description: provenance + type: string + receiver: + description: receiver + minLength: 1 + type: string + repeat_interval: + description: repeat interval + type: string + routeSelector: + description: |- + selects GrafanaNotificationPolicyRoutes to merge in when specified + mutually exclusive with Routes + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + routes: + description: routes, mutually exclusive with RouteSelector + x-kubernetes-preserve-unknown-fields: true + required: + - receiver + type: object + status: + description: The most recent observed state of a Grafana resource + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafananotificationtemplates.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafananotificationtemplates.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaNotificationTemplate + listKind: GrafanaNotificationTemplateList + plural: grafananotificationtemplates + singular: grafananotificationtemplate + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaNotificationTemplate is the Schema for the GrafanaNotificationTemplate + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaNotificationTemplateSpec defines the desired state + of GrafanaNotificationTemplate + properties: + allowCrossNamespaceImport: + default: false + description: Allow the Operator to match this resource with Grafanas + outside the current namespace + type: boolean + editable: + description: Whether to enable or disable editing of the notification + template in Grafana UI + type: boolean + x-kubernetes-validations: + - message: spec.editable is immutable + rule: self == oldSelf + instanceSelector: + description: Selects Grafana instances for import + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: spec.instanceSelector is immutable + rule: self == oldSelf + name: + description: Template name + type: string + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + suspend: + description: Suspend pauses synchronizing attempts and tells the operator + to ignore changes + type: boolean + template: + description: Template content + type: string + required: + - instanceSelector + - name + type: object + x-kubernetes-validations: + - message: spec.editable is immutable + rule: ((!has(oldSelf.editable) && !has(self.editable)) || (has(oldSelf.editable) + && has(self.editable))) + - message: disabling spec.allowCrossNamespaceImport requires a recreate + to ensure desired state + rule: '!oldSelf.allowCrossNamespaceImport || (oldSelf.allowCrossNamespaceImport + && self.allowCrossNamespaceImport)' + status: + description: The most recent observed state of a Grafana resource + properties: + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanas.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanas.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: Grafana + listKind: GrafanaList + plural: grafanas + singular: grafana + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.version + name: Version + type: string + - jsonPath: .status.stage + name: Stage + type: string + - jsonPath: .status.stageStatus + name: Stage status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Grafana is the Schema for the grafanas API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaSpec defines the desired state of Grafana + properties: + client: + description: Client defines how the grafana-operator talks to the grafana instance. + properties: + headers: + additionalProperties: + type: string + description: Custom HTTP headers to use when interacting with this Grafana. + type: object + preferIngress: + description: If the operator should send it's request through the grafana instances ingress object instead of through the service. + nullable: true + type: boolean + timeout: + nullable: true + type: integer + tls: + description: TLS Configuration used to talk with the grafana instance. + properties: + certSecretRef: + description: Use a secret as a reference to give TLS Certificate information + properties: + name: + description: name is unique within a namespace to reference a secret resource. + type: string + namespace: + description: namespace defines the space within which the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + insecureSkipVerify: + description: Disable the CA check of the server + type: boolean + type: object + x-kubernetes-validations: + - message: insecureSkipVerify and certSecretRef cannot be set at the same time + rule: (has(self.insecureSkipVerify) && !(has(self.certSecretRef))) || (has(self.certSecretRef) && !(has(self.insecureSkipVerify))) + type: object + config: + additionalProperties: + additionalProperties: + type: string + type: object + description: Config defines how your grafana ini file should looks like. + type: object + x-kubernetes-preserve-unknown-fields: true + deployment: + description: Deployment sets how the deployment object should look like with your grafana instance, contains a number of defaults. + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + minReadySeconds: + format: int32 + type: integer + paused: + type: boolean + progressDeadlineSeconds: + format: int32 + type: integer + replicas: + format: int32 + type: integer + revisionHistoryLimit: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + strategy: + properties: + rollingUpdate: + properties: + maxSurge: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxChangePolicy: + type: string + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podCertificate: + properties: + certificateChainPath: + type: string + credentialBundlePath: + type: string + keyPath: + type: string + keyType: + type: string + maxExpirationSeconds: + format: int32 + type: integer + signerName: + type: string + required: + - keyType + - signerName + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + type: object + type: object + type: object + disableDefaultAdminSecret: + description: DisableDefaultAdminSecret prevents operator from creating default admin-credentials secret + type: boolean + disableDefaultSecurityContext: + description: DisableDefaultSecurityContext prevents the operator from populating securityContext on deployments + enum: + - Pod + - Container + - All + type: string + external: + description: External enables you to configure external grafana instances that is not managed by the operator. + properties: + adminPassword: + description: AdminPassword key to talk to the external grafana instance. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + adminUser: + description: AdminUser key to talk to the external grafana instance. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiKey: + description: The API key to talk to the external grafana instance, you need to define ether apiKey or adminUser/adminPassword. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + tls: + description: DEPRECATED, use top level `tls` instead. + properties: + certSecretRef: + description: Use a secret as a reference to give TLS Certificate information + properties: + name: + description: name is unique within a namespace to reference a secret resource. + type: string + namespace: + description: namespace defines the space within which the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + insecureSkipVerify: + description: Disable the CA check of the server + type: boolean + type: object + x-kubernetes-validations: + - message: insecureSkipVerify and certSecretRef cannot be set at the same time + rule: (has(self.insecureSkipVerify) && !(has(self.certSecretRef))) || (has(self.certSecretRef) && !(has(self.insecureSkipVerify))) + url: + description: URL of the external grafana instance you want to manage. + type: string + required: + - url + type: object + ingress: + description: Ingress sets how the ingress object should look like with your grafana instance. + properties: + metadata: + description: ObjectMeta contains only a [subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta). + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + description: IngressSpec describes the Ingress the user wishes to exist. + properties: + defaultBackend: + description: |- + defaultBackend is the backend that should handle requests that don't + match any rule. If Rules are not specified, DefaultBackend must be specified. + If DefaultBackend is not set, the handling of requests that do not match any + of the rules will be up to the Ingress controller. + properties: + resource: + description: |- + resource is an ObjectRef to another Kubernetes resource in the namespace + of the Ingress object. If resource is specified, a service.Name and + service.Port must not be specified. + This is a mutually exclusive setting with "Service". + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + service: + description: |- + service references a service as a backend. + This is a mutually exclusive setting with "Resource". + properties: + name: + description: |- + name is the referenced service. The service must exist in + the same namespace as the Ingress object. + type: string + port: + description: |- + port of the referenced service. A port name or port number + is required for a IngressServiceBackend. + properties: + name: + description: |- + name is the name of the port on the Service. + This is a mutually exclusive setting with "Number". + type: string + number: + description: |- + number is the numerical port number (e.g. 80) on the Service. + This is a mutually exclusive setting with "Name". + format: int32 + type: integer + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: object + ingressClassName: + description: |- + ingressClassName is the name of an IngressClass cluster resource. Ingress + controller implementations use this field to know whether they should be + serving this Ingress resource, by a transitive connection + (controller -> IngressClass -> Ingress resource). Although the + `kubernetes.io/ingress.class` annotation (simple constant name) was never + formally defined, it was widely supported by Ingress controllers to create + a direct binding between Ingress controller and Ingress resources. Newly + created Ingress resources should prefer using the field. However, even + though the annotation is officially deprecated, for backwards compatibility + reasons, ingress controllers should still honor that annotation if present. + type: string + rules: + description: |- + rules is a list of host rules used to configure the Ingress. If unspecified, + or no rule matches, all traffic is sent to the default backend. + items: + description: |- + IngressRule represents the rules mapping the paths under a specified host to + the related backend services. Incoming requests are first evaluated for a host + match, then routed to the backend associated with the matching IngressRuleValue. + properties: + host: + description: "host is the fully qualified domain name of a network host, as defined by RFC 3986.\nNote the following deviations from the \"host\" part of the\nURI as defined in RFC 3986:\n1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future.\nIncoming requests are matched against the host before the\nIngressRuleValue. If the host is unspecified, the Ingress routes all\ntraffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of\na network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name\nprefixed with a single wildcard label (e.g. \"*.foo.com\").\nThe wildcard character '*' must appear by itself as the first DNS label and\nmatches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\").\nRequests will be matched against the Host field in the following way:\n1. If host is precise, the request matches this rule if the http host header is equal to Host.\n2. If host is a wildcard, then the request matches this rule if the http host header\nis to equal to the suffix (removing the first label) of the wildcard rule." + type: string + http: + description: |- + HTTPIngressRuleValue is a list of http selectors pointing to backends. + In the example: http:///? -> backend where + where parts of the url correspond to RFC 3986, this resource will be used + to match against everything after the last '/' and before the first '?' + or '#'. + properties: + paths: + description: paths is a collection of paths that map requests to backends. + items: + description: |- + HTTPIngressPath associates a path with a backend. Incoming urls matching the + path are forwarded to the backend. + properties: + backend: + description: |- + backend defines the referenced service endpoint to which the traffic + will be forwarded to. + properties: + resource: + description: |- + resource is an ObjectRef to another Kubernetes resource in the namespace + of the Ingress object. If resource is specified, a service.Name and + service.Port must not be specified. + This is a mutually exclusive setting with "Service". + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + service: + description: |- + service references a service as a backend. + This is a mutually exclusive setting with "Resource". + properties: + name: + description: |- + name is the referenced service. The service must exist in + the same namespace as the Ingress object. + type: string + port: + description: |- + port of the referenced service. A port name or port number + is required for a IngressServiceBackend. + properties: + name: + description: |- + name is the name of the port on the Service. + This is a mutually exclusive setting with "Number". + type: string + number: + description: |- + number is the numerical port number (e.g. 80) on the Service. + This is a mutually exclusive setting with "Name". + format: int32 + type: integer + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: object + path: + description: |- + path is matched against the path of an incoming request. Currently it can + contain characters disallowed from the conventional "path" part of a URL + as defined by RFC 3986. Paths must begin with a '/' and must be present + when using PathType with value "Exact" or "Prefix". + type: string + pathType: + description: |- + pathType determines the interpretation of the path matching. PathType can + be one of the following values: + * Exact: Matches the URL path exactly. + * Prefix: Matches based on a URL path prefix split by '/'. Matching is + done on a path element by element basis. A path element refers is the + list of labels in the path split by the '/' separator. A request is a + match for path p if every p is an element-wise prefix of p of the + request path. Note that if the last element of the path is a substring + of the last element in request path, it is not a match (e.g. /foo/bar + matches /foo/bar/baz, but does not match /foo/barbaz). + * ImplementationSpecific: Interpretation of the Path matching is up to + the IngressClass. Implementations can treat this as a separate PathType + or treat it identically to Prefix or Exact path types. + Implementations are required to support all path types. + type: string + required: + - backend + - pathType + type: object + type: array + x-kubernetes-list-type: atomic + required: + - paths + type: object + type: object + type: array + x-kubernetes-list-type: atomic + tls: + description: |- + tls represents the TLS configuration. Currently the Ingress only supports a + single TLS port, 443. If multiple members of this list specify different hosts, + they will be multiplexed on the same port according to the hostname specified + through the SNI TLS extension, if the ingress controller fulfilling the + ingress supports SNI. + items: + description: IngressTLS describes the transport layer security associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + jsonnet: + properties: + libraryLabelSelector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + persistentVolumeClaim: + description: PersistentVolumeClaim creates a PVC if you need to attach one to your grafana instance. + properties: + metadata: + description: ObjectMeta contains only a [subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta). + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeMode: + description: PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + type: object + preferences: + description: Preferences holds the Grafana Preferences settings + properties: + homeDashboardUid: + type: string + type: object + route: + description: Route sets how the ingress object should look like with your grafana instance, this only works in Openshift. + properties: + metadata: + description: ObjectMeta contains only a [subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta). + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + alternateBackends: + items: + description: |- + RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' + kind is allowed. Use 'weight' field to emphasize one over others. + properties: + kind: + default: Service + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + enum: + - Service + - "" + type: string + name: + description: name of the service/target that is being referred to. e.g. name of the service + minLength: 1 + type: string + weight: + default: 100 + description: |- + weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + against other target reference objects. 0 suppresses requests to this backend. + format: int32 + maximum: 256 + minimum: 0 + type: integer + required: + - kind + - name + type: object + type: array + host: + type: string + path: + type: string + port: + description: RoutePort defines a port mapping from a router to an endpoint in the service endpoints. + properties: + targetPort: + anyOf: + - type: integer + - type: string + description: |- + The target port on pods selected by the service this route points to. + If this is a string, it will be looked up as a named port in the target + endpoints port list. Required + x-kubernetes-int-or-string: true + required: + - targetPort + type: object + subdomain: + type: string + tls: + description: TLSConfig defines config used to secure a route and provide termination + properties: + caCertificate: + description: caCertificate provides the cert authority certificate contents + type: string + certificate: + description: |- + certificate provides certificate contents. This should be a single serving certificate, not a certificate + chain. Do not include a CA certificate. + type: string + destinationCACertificate: + description: |- + destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + termination this file should be provided in order to have routers use it for health checks on the secure connection. + If this field is not specified, the router may provide its own destination CA and perform hostname validation using + the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + verify. + type: string + externalCertificate: + description: |- + externalCertificate provides certificate contents as a secret reference. + This should be a single serving certificate, not a certificate + chain. Do not include a CA certificate. The secret referenced should + be present in the same namespace as that of the Route. + Forbidden when `certificate` is set. + The router service account needs to be granted with read-only access to this secret, + please refer to openshift docs for additional details. + properties: + name: + description: |- + name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + insecureEdgeTerminationPolicy: + description: |- + insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + each router may make its own decisions on which ports to expose, this is normally port 80. + + If a route does not specify insecureEdgeTerminationPolicy, then the default behavior is "None". + + * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only). + + * None - no traffic is allowed on the insecure port (default). + + * Redirect - clients are redirected to the secure port. + enum: + - Allow + - None + - Redirect + - "" + type: string + key: + description: key provides key file contents + type: string + termination: + description: |- + termination indicates termination type. + + * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + * reencrypt - TLS termination is done by the router and https is used to communicate with the backend + + Note: passthrough termination is incompatible with httpHeader actions + enum: + - edge + - reencrypt + - passthrough + type: string + required: + - termination + type: object + x-kubernetes-validations: + - message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + to: + description: |- + RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' + kind is allowed. Use 'weight' field to emphasize one over others. + properties: + kind: + default: Service + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + enum: + - Service + - "" + type: string + name: + description: name of the service/target that is being referred to. e.g. name of the service + minLength: 1 + type: string + weight: + default: 100 + description: |- + weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + against other target reference objects. 0 suppresses requests to this backend. + format: int32 + maximum: 256 + minimum: 0 + type: integer + required: + - kind + - name + type: object + wildcardPolicy: + description: WildcardPolicyType indicates the type of wildcard support needed by routes. + type: string + type: object + type: object + service: + description: Service sets how the service object should look like with your grafana instance, contains a number of defaults. + properties: + metadata: + description: ObjectMeta contains only a [subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta). + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + description: ServiceSpec describes the attributes that a user creates on a service. + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount sets how the ServiceAccount object should look like with your grafana instance, contains a number of defaults. + properties: + automountServiceAccountToken: + type: boolean + imagePullSecrets: + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + metadata: + description: ObjectMeta contains only a [subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta). + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + secrets: + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + type: object + suspend: + description: Suspend pauses reconciliation of owned resources like deployments, Services, Etc. upon changes + type: boolean + version: + description: Version specifies the version of Grafana to use for this deployment. It follows the same format as the docker.io/grafana/grafana tags + type: string + type: object + status: + description: GrafanaStatus defines the observed state of Grafana + properties: + adminUrl: + type: string + alertRuleGroups: + items: + type: string + type: array + conditions: + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + contactPoints: + items: + type: string + type: array + dashboards: + items: + type: string + type: array + datasources: + items: + type: string + type: array + folders: + items: + type: string + type: array + lastMessage: + type: string + libraryPanels: + items: + type: string + type: array + muteTimings: + items: + type: string + type: array + notificationTemplates: + items: + type: string + type: array + serviceaccounts: + items: + type: string + type: array + stage: + type: string + stageStatus: + type: string + version: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/charts/crds/crds/grafana.integreatly.org_grafanaserviceaccounts.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: grafanaserviceaccounts.grafana.integreatly.org +spec: + group: grafana.integreatly.org + names: + categories: + - grafana-operator + kind: GrafanaServiceAccount + listKind: GrafanaServiceAccountList + plural: grafanaserviceaccounts + singular: grafanaserviceaccount + scope: Namespaced + versions: + - additionalPrinterColumns: + - format: date-time + jsonPath: .status.lastResync + name: Last resync + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: GrafanaServiceAccount is the Schema for the grafanaserviceaccounts + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GrafanaServiceAccountSpec defines the desired state of a + GrafanaServiceAccount. + properties: + instanceName: + description: Name of the Grafana instance to create the service account + for + minLength: 1 + type: string + x-kubernetes-validations: + - message: spec.instanceName is immutable + rule: self == oldSelf + isDisabled: + default: false + description: Whether the service account is disabled + type: boolean + name: + description: Name of the service account in Grafana + minLength: 1 + type: string + x-kubernetes-validations: + - message: spec.name is immutable + rule: self == oldSelf + resyncPeriod: + default: 10m0s + description: How often the resource is synced, defaults to 10m0s if + not set + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + x-kubernetes-validations: + - message: spec.resyncPeriod must be greater than 0 + rule: duration(self) > duration('0s') + role: + description: Role of the service account (Viewer, Editor, Admin) + enum: + - Viewer + - Editor + - Admin + type: string + suspend: + default: false + description: Suspend pauses reconciliation of the service account + type: boolean + tokens: + description: Tokens to create for the service account + items: + description: GrafanaServiceAccountTokenSpec defines a token for + a service account + properties: + expires: + description: Expiration date of the token. If not set, the token + never expires + format: date-time + type: string + name: + description: Name of the token + minLength: 1 + type: string + secretName: + description: Name of the secret to store the token. If not set, + a name will be generated + minLength: 1 + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - instanceName + - name + - role + type: object + status: + description: GrafanaServiceAccountStatus defines the observed state of + a GrafanaServiceAccount + properties: + account: + description: Info contains the Grafana service account information + properties: + id: + description: ID of the service account in Grafana + format: int64 + type: integer + isDisabled: + description: IsDisabled indicates if the service account is disabled + type: boolean + login: + type: string + name: + type: string + role: + description: Role is the Grafana role for the service account + (Viewer, Editor, Admin) + type: string + tokens: + description: Information about tokens + items: + description: GrafanaServiceAccountTokenStatus describes a token + created in Grafana. + properties: + expires: + description: |- + Expiration time of the token + N.B. There's possible discrepancy with the expiration time in spec + It happens because Grafana API accepts TTL in seconds then calculates the expiration time against the current time + format: date-time + type: string + id: + description: ID of the token in Grafana + format: int64 + type: integer + name: + type: string + secret: + description: Name of the secret containing the token + properties: + name: + type: string + namespace: + type: string + type: object + required: + - id + - name + type: object + type: array + required: + - id + - isDisabled + - login + - name + - role + type: object + conditions: + description: Results when synchonizing resource with Grafana instances + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastResync: + description: Last time the resource was synchronized with Grafana + instances + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + +--- +# Source: grafana-operator/charts/grafana-operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-operator + namespace: grafana-operator + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator +automountServiceAccountToken: true +--- +# Source: grafana-operator/charts/grafana-operator/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: grafana-operator + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator +rules: + - apiGroups: + - "" + resources: + - configmaps + - persistentvolumeclaims + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch + - apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - grafana.integreatly.org + resources: + - grafanaalertrulegroups + - grafanacontactpoints + - grafanadashboards + - grafanadatasources + - grafanafolders + - grafanalibrarypanels + - grafanamutetimings + - grafananotificationpolicies + - grafananotificationpolicyroutes + - grafananotificationtemplates + - grafanas + - grafanaserviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - grafana.integreatly.org + resources: + - grafanaalertrulegroups/finalizers + - grafanacontactpoints/finalizers + - grafanadashboards/finalizers + - grafanadatasources/finalizers + - grafanafolders/finalizers + - grafanalibrarypanels/finalizers + - grafanamutetimings/finalizers + - grafananotificationpolicies/finalizers + - grafananotificationpolicyroutes/finalizers + - grafananotificationtemplates/finalizers + - grafanas/finalizers + - grafanaserviceaccounts/finalizers + verbs: + - update + - apiGroups: + - grafana.integreatly.org + resources: + - grafanaalertrulegroups/status + - grafanacontactpoints/status + - grafanadashboards/status + - grafanadatasources/status + - grafanafolders/status + - grafanalibrarypanels/status + - grafanamutetimings/status + - grafananotificationpolicies/status + - grafananotificationpolicyroutes/status + - grafananotificationtemplates/status + - grafanas/status + - grafanaserviceaccounts/status + verbs: + - get + - patch + - update + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: grafana-operator/charts/grafana-operator/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grafana-operator + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator +subjects: + - kind: ServiceAccount + name: grafana-operator + namespace: grafana-operator +roleRef: + kind: ClusterRole + name: grafana-operator + apiGroup: rbac.authorization.k8s.io +--- +# Source: grafana-operator/charts/grafana-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: grafana-operator-metrics-service + namespace: grafana-operator + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator +spec: + type: ClusterIP + ports: + - port: 9090 + targetPort: metrics + protocol: TCP + name: metrics + - port: 8888 + targetPort: pprof + protocol: TCP + name: pprof + selector: + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator +--- +# Source: grafana-operator/charts/grafana-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-operator + namespace: grafana-operator + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + template: + metadata: + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator + spec: + serviceAccountName: grafana-operator + containers: + - name: grafana-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + image: "ghcr.io/grafana/grafana-operator:v5.20.0" + imagePullPolicy: IfNotPresent + env: + - name: WATCH_NAMESPACE + value: + - name: WATCH_NAMESPACE_SELECTOR + value: "" + - name: WATCH_LABEL_SELECTORS + value: "" + - name: ENFORCE_CACHE_LABELS + value: "safe" + + - name: CLUSTER_DOMAIN + value: "" + args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=0.0.0.0:9090 + - --pprof-addr=0.0.0.0:8888 + - --zap-encoder=console + - --zap-log-level=info + - --zap-time-encoding=rfc3339 + - --leader-elect + - --max-concurrent-reconciles=1 + volumeMounts: + - name: dashboards-dir + mountPath: /tmp/dashboards + ports: + - containerPort: 9090 + name: metrics + protocol: TCP + - containerPort: 8888 + name: pprof + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8081 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + resources: + requests: + cpu: 10m + memory: 64Mi + volumes: + - name: dashboards-dir + emptyDir: {} +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: grafana-operator-postgresql-17-cluster + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie" + imagePullPolicy: IfNotPresent + postgresUID: 26 + postgresGID: 26 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "grafana-operator-postgresql-17-external-backup" + serverName: "grafana-operator-postgresql-17-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "grafana-operator-postgresql-17-garage-local-backup" + serverName: "grafana-operator-postgresql-17-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "grafana-operator-postgresql-17-recovery" + serverName: grafana-operator-postgresql-17-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: false + enablePDB: true + + postgresql: + parameters: + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + bootstrap: + recovery: + + database: app + + source: grafana-operator-postgresql-17-backup-1 + + externalClusters: + - name: grafana-operator-postgresql-17-backup-1 + plugin: + name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "grafana-operator-postgresql-17-recovery" + serverName: grafana-operator-postgresql-17-backup-1 +--- +# Source: grafana-operator/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: grafana-auth-secret + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-auth-secret + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: admin-user + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/grafana/auth + metadataPolicy: None + property: admin-user + - secretKey: admin-password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/grafana/auth + metadataPolicy: None + property: admin-password +--- +# Source: grafana-operator/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: grafana-oauth-secret + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-oauth-secret + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: AUTH_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/grafana + metadataPolicy: None + property: client + - secretKey: AUTH_CLIENT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/grafana + metadataPolicy: None + property: secret +--- +# Source: grafana-operator/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: grafana-operator-postgresql-17-cluster-backup-secret + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-operator-postgresql-17-cluster-backup-secret + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: grafana-operator/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: grafana-operator-postgresql-17-cluster-backup-secret-garage + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-operator-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: grafana-operator/templates/grafana.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: Grafana +metadata: + name: grafana-main + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-main + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app: grafana-main +spec: + config: + analytics: + enabled: "false" + check_for_updates: "false" + reporting_enabled: "false" + server: + domain: alexlebens.net + root_url: https://grafana.alexlebens.net + log: + mode: "console" + users: + auto_assign_org: "true" + auto_assign_org_id: "1" + auth: + disable_login_form: "true" + auto_login: "true" + signout_redirect_url: https://authentik.alexlebens.net/application/o/grafana/end-session/ + auth.generic_oauth: + enabled: "true" + name: Authentik + allow_sign_up: "true" + client_id: ${AUTH_CLIENT_ID} + client_secret: ${AUTH_CLIENT_SECRET} + scopes: openid profile email + auth_url: https://authentik.alexlebens.net/application/o/authorize/ + token_url: https://authentik.alexlebens.net/application/o/token/ + api_url: https://authentik.alexlebens.net/application/o/userinfo/ + role_attribute_path: contains(groups, 'Grafana Admins') && 'Admin' || contains(groups, 'Grafana Editors') && 'Editor' || 'Viewer' + database: + type: postgres + host: "${DB_HOST}:${DB_PORT}" + name: ${DB_DATABASE} + user: ${DB_USER} + password: ${DB_PASSWORD} + remote_cache: + type: redis + connstr: addr=redis-replication-remote-cache-master.grafana-operator:6379,pool_size=100,db=0,ssl=false + unified_alerting: + enabled: "true" + ha_redis_address: redis-replication-unified-alerting-master.grafana-operator:6379 + deployment: + spec: + replicas: 3 + template: + spec: + containers: + - name: grafana + image: grafana/grafana:12.0.0 + resources: + requests: + cpu: 100m + memory: 128Mi + env: + - name: AUTH_CLIENT_ID + valueFrom: + secretKeyRef: + name: grafana-oauth-secret + key: AUTH_CLIENT_ID + - name: AUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: grafana-oauth-secret + key: AUTH_CLIENT_SECRET + - name: ADMIN_USER + valueFrom: + secretKeyRef: + name: grafana-auth-secret + key: admin-user + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: grafana-auth-secret + key: admin-password + - name: DB_HOST + valueFrom: + secretKeyRef: + name: grafana-operator-postgresql-17-cluster-app + key: host + - name: DB_DATABASE + valueFrom: + secretKeyRef: + name: grafana-operator-postgresql-17-cluster-app + key: dbname + - name: DB_PORT + valueFrom: + secretKeyRef: + name: grafana-operator-postgresql-17-cluster-app + key: port + - name: DB_USER + valueFrom: + secretKeyRef: + name: grafana-operator-postgresql-17-cluster-app + key: user + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: grafana-operator-postgresql-17-cluster-app + key: password +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-ceph + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-ceph + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/ceph.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-coredns + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-coredns + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/coredns.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-etcd + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-etcd + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/etcd.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-garage + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-garage + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/garage.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-loki + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-loki + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/loki.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-node-full + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-node-full + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/node-full.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-node-short + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-node-short + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-system + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/system/node-short.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-argocd + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-argocd + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/argocd.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-blocky + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-blocky + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/blocky.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-cert-manager + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-cert-manager + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/cert-manager.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-cloudnative-pg + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-cloudnative-pg + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/cloudnative-pg.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-descheduler + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-descheduler + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/descheduler.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-gatus + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-gatus + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/gatus.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-operator + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/grafana-operator.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-harbor + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-harbor + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/harbor.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-redis-replication + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-redis-replication + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/redis-replication.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-redis-operator + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-redis-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/redis-operator.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-speedtest-exporter + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-speedtest-exporter + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/speedtest-exporter.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-spegel + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-spegel + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/spegel.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-traefik + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-traefik + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/traefik.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-trivy + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-trivy + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/trivy.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-unpoller + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-unpoller + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/unpoller.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-volsync + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-volsync + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-service + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/service/volsync.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-s3 + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-s3 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-platform + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/platform/s3.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-authentik + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-authentik + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-platform + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/platform/authentik.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-gitea + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-gitea + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-platform + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/platform/gitea.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-ntfy + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-ntfy + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-platform + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/platform/ntfy.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-qbittorrent + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-qbittorrent + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-platform + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/platform/qbittorrent.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-vault + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-vault + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-platform + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/platform/vault.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-airgradient + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-airgradient + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-iot + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/iot/airgradient.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-server-power-consumption + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-server-power-consumption + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-iot + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/iot/server-power-consumption.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-immich + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-immich + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-application + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/application/immich.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-radarr + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-radarr + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-application + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/application/radarr.json +--- +# Source: grafana-operator/templates/grafana-dashboard.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDashboard +metadata: + name: grafana-dashboard-sonarr + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-dashboard-sonarr + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + contentCacheDuration: 1h + folderUID: grafana-folder-application + resyncPeriod: 1h + url: http://gitea-http.gitea:3000/alexlebens/grafana-dashboards/raw/branch/main/dashboards/application/sonarr.json +--- +# Source: grafana-operator/templates/grafana-datasource.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDatasource +metadata: + name: grafana-datasource-prometheus + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-datasource-prometheus + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + datasource: + name: Prometheus + type: prometheus + url: http://kube-prometheus-stack-prometheus.kube-prometheus-stack:9090/ + access: proxy + isDefault: true + jsonData: + timeInterval: 30s + instanceSelector: + matchLabels: + app: grafana-main + plugins: + - name: camptocamp-prometheus-alertmanager-datasource + version: 2.1.0 + resyncPeriod: 30s + uid: kube-prometheus-stack +--- +# Source: grafana-operator/templates/grafana-datasource.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaDatasource +metadata: + name: grafana-datasource-loki + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-datasource-loki + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + datasource: + name: Loki + type: loki + url: http://loki.loki:3100/ + access: proxy + instanceSelector: + matchLabels: + app: grafana-main + resyncPeriod: 30s + uid: loki +--- +# Source: grafana-operator/templates/grafana-folder.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaFolder +metadata: + name: grafana-folder-application + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-folder-application + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + title: Application + uid: grafana-folder-application + resyncPeriod: 30s + permissions: | + { + "items": [ + { + "role": "Admin", + "permission": 4 + }, + { + "role": "Editor", + "permission": 2 + }, + { + "role": "Viewer", + "permission": 1 + } + ] + } +--- +# Source: grafana-operator/templates/grafana-folder.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaFolder +metadata: + name: grafana-folder-iot + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-folder-iot + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + title: IoT + uid: grafana-folder-iot + resyncPeriod: 30s + permissions: | + { + "items": [ + { + "role": "Admin", + "permission": 4 + }, + { + "role": "Editor", + "permission": 2 + }, + { + "role": "Viewer", + "permission": 1 + } + ] + } +--- +# Source: grafana-operator/templates/grafana-folder.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaFolder +metadata: + name: grafana-folder-platform + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-folder-platform + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + title: Platform + uid: grafana-folder-platform + resyncPeriod: 30s + permissions: | + { + "items": [ + { + "role": "Admin", + "permission": 4 + }, + { + "role": "Editor", + "permission": 2 + }, + { + "role": "Viewer", + "permission": 1 + } + ] + } +--- +# Source: grafana-operator/templates/grafana-folder.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaFolder +metadata: + name: grafana-folder-service + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-folder-service + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app: grafana-main + title: Service + uid: grafana-folder-service + resyncPeriod: 30s + permissions: | + { + "items": [ + { + "role": "Admin", + "permission": 4 + }, + { + "role": "Editor", + "permission": 2 + }, + { + "role": "Viewer", + "permission": 1 + } + ] + } +--- +# Source: grafana-operator/templates/grafana-folder.yaml +apiVersion: grafana.integreatly.org/v1beta1 +kind: GrafanaFolder +metadata: + name: grafana-folder-system + namespace: grafana-operator + labels: + app.kubernetes.io/name: grafana-folder-system + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + instanceSelector: + matchLabels: + app.kubernetes.io/name: grafana-main + title: System + uid: grafana-folder-system + resyncPeriod: 30s + permissions: | + { + "items": [ + { + "role": "Admin", + "permission": 4 + }, + { + "role": "Editor", + "permission": 2 + }, + { + "role": "Viewer", + "permission": 1 + } + ] + } +--- +# Source: grafana-operator/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-grafana + namespace: grafana-operator + labels: + app.kubernetes.io/name: http-route-grafana + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - grafana.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: grafana-main-service + port: 3000 + weight: 100 +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "grafana-operator-postgresql-17-external-backup" + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/grafana-operator/grafana-operator-postgresql-17-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: grafana-operator-postgresql-17-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: grafana-operator-postgresql-17-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "grafana-operator-postgresql-17-garage-local-backup" + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/grafana-operator/grafana-operator-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: grafana-operator-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: grafana-operator-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: grafana-operator-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "grafana-operator-postgresql-17-recovery" + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/grafana-operator/grafana-operator-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: grafana-operator-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: grafana-operator-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: grafana-operator-postgresql-17-alert-rules + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/grafana-operator-postgresql-17 + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="grafana-operator"} - cnpg_pg_replication_is_wal_receiver_up{namespace="grafana-operator"}) < 1 + for: 5m + labels: + severity: critical + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="grafana-operator"} - cnpg_pg_replication_is_wal_receiver_up{namespace="grafana-operator"}) < 2 + for: 5m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="grafana-operator", pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="grafana-operator", pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="grafana-operator", pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="grafana-operator", pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="grafana-operator",pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="grafana-operator", pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="grafana-operator", persistentvolumeclaim=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="grafana-operator",pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "grafana-operator/grafana-operator-postgresql-17-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="grafana-operator", pod=~"grafana-operator-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: grafana-operator + cnpg_cluster: grafana-operator-postgresql-17-cluster +--- +# Source: grafana-operator/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-unified-alerting + namespace: grafana-operator + labels: + app.kubernetes.io/name: redis-replication-unified-alerting + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: grafana-operator/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-remote-cache + namespace: grafana-operator + labels: + app.kubernetes.io/name: redis-replication-remote-cache + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "grafana-operator-postgresql-17-daily-backup-scheduled-backup" + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: grafana-operator-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "grafana-operator-postgresql-17-external-backup" +--- +# Source: grafana-operator/charts/postgres-17-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "grafana-operator-postgresql-17-live-backup-scheduled-backup" + namespace: grafana-operator + labels: + helm.sh/chart: postgres-17-cluster-6.16.0 + app.kubernetes.io/name: grafana-operator-postgresql-17 + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: grafana-operator-postgresql-17-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "grafana-operator-postgresql-17-garage-local-backup" +--- +# Source: grafana-operator/charts/grafana-operator/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: grafana-operator + namespace: grafana-operator + labels: + helm.sh/chart: grafana-operator-v5.20.0 + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/version: "v5.20.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: grafana-operator + app.kubernetes.io/component: operator +spec: + jobLabel: grafana-operator + namespaceSelector: + matchNames: + - grafana-operator + selector: + matchLabels: + app.kubernetes.io/name: grafana-operator + app.kubernetes.io/instance: grafana-operator + endpoints: + - port: metrics + path: /metrics + interval: 1m + scrapeTimeout: 10s +--- +# Source: grafana-operator/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-grafana-operator + namespace: grafana-operator + labels: + app.kubernetes.io/name: redis-replication-grafana-operator + app.kubernetes.io/instance: grafana-operator + app.kubernetes.io/part-of: grafana-operator + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s diff --git a/clusters/cl01tl/manifests/headlamp/headlamp.yaml b/clusters/cl01tl/manifests/headlamp/headlamp.yaml new file mode 100644 index 000000000..aa276631d --- /dev/null +++ b/clusters/cl01tl/manifests/headlamp/headlamp.yaml @@ -0,0 +1,308 @@ +--- +# Source: headlamp/charts/headlamp/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: headlamp + namespace: headlamp + labels: + helm.sh/chart: headlamp-0.38.0 + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + app.kubernetes.io/version: "0.38.0" + app.kubernetes.io/managed-by: Helm +--- +# Source: headlamp/templates/service-account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: headlamp-admin + namespace: headlamp + labels: + app.kubernetes.io/name: headlamp-admin + app.kubernetes.io/instance: headlamp + app.kubernetes.io/part-of: headlamp +--- +# Source: headlamp/charts/headlamp/templates/plugin-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: headlamp-plugin-config + namespace: headlamp + labels: + helm.sh/chart: headlamp-0.38.0 + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + app.kubernetes.io/version: "0.38.0" + app.kubernetes.io/managed-by: Helm +data: + plugin.yml: | + plugins: + - name: cert-manager + source: https://artifacthub.io/packages/headlamp/headlamp-plugins/headlamp_cert-manager + version: 0.1.0 + - name: trivy + source: https://artifacthub.io/packages/headlamp/headlamp-trivy/headlamp_trivy + version: 0.3.1 + installOptions: + parallel: true + maxConcurrent: 2 +--- +# Source: headlamp/charts/headlamp/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: headlamp-admin + labels: + helm.sh/chart: headlamp-0.38.0 + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + app.kubernetes.io/version: "0.38.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: headlamp + namespace: headlamp +--- +# Source: headlamp/templates/cluster-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-admin-oidc + namespace: headlamp + labels: + app.kubernetes.io/name: cluster-admin-oidc + app.kubernetes.io/instance: headlamp + app.kubernetes.io/part-of: headlamp +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: User + name: alexanderlebens@gmail.com + apiGroup: rbac.authorization.k8s.io + - kind: ServiceAccount + name: headlamp-admin + namespace: headlamp +--- +# Source: headlamp/charts/headlamp/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: headlamp + namespace: headlamp + labels: + helm.sh/chart: headlamp-0.38.0 + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + app.kubernetes.io/version: "0.38.0" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp +--- +# Source: headlamp/charts/headlamp/templates/deployment.yaml +# This block of code is used to extract the values from the env. +# This is done to check if the values are non-empty and if they are, they are used in the deployment.yaml. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: headlamp + namespace: headlamp + labels: + helm.sh/chart: headlamp-0.38.0 + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + app.kubernetes.io/version: "0.38.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + template: + metadata: + labels: + app.kubernetes.io/name: headlamp + app.kubernetes.io/instance: headlamp + spec: + serviceAccountName: headlamp + automountServiceAccountToken: true + securityContext: + {} + containers: + - name: headlamp + securityContext: + privileged: false + runAsGroup: 101 + runAsNonRoot: true + runAsUser: 100 + image: "ghcr.io/headlamp-k8s/headlamp:v0.38.0" + imagePullPolicy: IfNotPresent + + # Check if externalSecret is enabled + envFrom: + - secretRef: + name: headlamp-oidc-secret + args: + - "-in-cluster" + - "-watch-plugins-changes" + - "-plugins-dir=/headlamp/plugins" + - "-oidc-client-id=$(OIDC_CLIENT_ID)" + - "-oidc-client-secret=$(OIDC_CLIENT_SECRET)" + - "-oidc-idp-issuer-url=$(OIDC_ISSUER_URL)" + - "-oidc-scopes=$(OIDC_SCOPES)" + ports: + - name: http + containerPort: 4466 + protocol: TCP + livenessProbe: + httpGet: + path: "/" + port: http + readinessProbe: + httpGet: + path: "/" + port: http + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - name: plugins-dir + mountPath: /headlamp/plugins + - name: headlamp-plugin + image: node:lts-alpine + command: ["/bin/sh", "-c"] + args: + - | + if [ -f "/config/plugin.yml" ]; then + echo "Installing plugins from config..." + cat /config/plugin.yml + # Use a writable cache directory + export NPM_CONFIG_CACHE=/tmp/npm-cache + # Use a writable config directory + export NPM_CONFIG_USERCONFIG=/tmp/npm-userconfig + mkdir -p /tmp/npm-cache /tmp/npm-userconfig + npx --yes @headlamp-k8s/pluginctl@latest install --config /config/plugin.yml --folderName /headlamp/plugins --watch + fi + volumeMounts: + - name: plugins-dir + mountPath: /headlamp/plugins + - name: plugin-config + mountPath: /config + resources: + null + securityContext: + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + volumes: + - name: plugins-dir + emptyDir: {} + - name: plugin-config + configMap: + name: headlamp-plugin-config +--- +# Source: headlamp/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: headlamp-oidc-secret + namespace: headlamp + labels: + app.kubernetes.io/name: headlamp-oidc-secret + app.kubernetes.io/instance: headlamp + app.kubernetes.io/part-of: headlamp +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: OIDC_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/headlamp + metadataPolicy: None + property: client + - secretKey: OIDC_CLIENT_SECRET + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/headlamp + metadataPolicy: None + property: secret + - secretKey: OIDC_ISSUER_URL + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/headlamp + metadataPolicy: None + property: issuer + - secretKey: OIDC_SCOPES + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/headlamp + metadataPolicy: None + property: scopes + - secretKey: OIDC_VALIDATOR_ISSUER_URL + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/headlamp + metadataPolicy: None + property: validator-issuer-url + - secretKey: OIDC_VALIDATOR_CLIENT_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/headlamp + metadataPolicy: None + property: validator-client-id +--- +# Source: headlamp/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: https-route-headlamp + namespace: headlamp + labels: + app.kubernetes.io/name: https-route-headlamp + app.kubernetes.io/instance: headlamp + app.kubernetes.io/part-of: headlamp +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - headlamp.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: headlamp + port: 80 + weight: 100 diff --git a/clusters/cl01tl/manifests/komodo/komodo.yaml b/clusters/cl01tl/manifests/komodo/komodo.yaml new file mode 100644 index 000000000..8200575bc --- /dev/null +++ b/clusters/cl01tl/manifests/komodo/komodo.yaml @@ -0,0 +1,945 @@ +--- +# Source: komodo/charts/komodo/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: komodo-cache + labels: + app.kubernetes.io/instance: komodo + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: komodo + helm.sh/chart: komodo-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: komodo +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: komodo/charts/komodo/templates/common.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: komodo-syncs + labels: + app.kubernetes.io/instance: komodo + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: komodo + helm.sh/chart: komodo-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: komodo +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + storageClassName: "ceph-block" +--- +# Source: komodo/charts/komodo/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: komodo-ferretdb-2 + labels: + app.kubernetes.io/instance: komodo + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: komodo + app.kubernetes.io/service: komodo-ferretdb-2 + helm.sh/chart: komodo-4.4.0 + namespace: komodo +spec: + type: ClusterIP + ports: + - port: 27017 + targetPort: 27017 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: ferretdb-2 + app.kubernetes.io/instance: komodo + app.kubernetes.io/name: komodo +--- +# Source: komodo/charts/komodo/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: komodo-main + labels: + app.kubernetes.io/instance: komodo + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: komodo + app.kubernetes.io/service: komodo-main + helm.sh/chart: komodo-4.4.0 + namespace: komodo +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 9120 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: komodo + app.kubernetes.io/name: komodo +--- +# Source: komodo/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: komodo-periphery-ps10rp + namespace: komodo + labels: + app.kubernetes.io/name: komodo-periphery-ps10rp + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + annotations: + tailscale.com/tailnet-fqdn: komodo-periphery-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: komodo/charts/komodo/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: komodo-ferretdb-2 + labels: + app.kubernetes.io/controller: ferretdb-2 + app.kubernetes.io/instance: komodo + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: komodo + helm.sh/chart: komodo-4.4.0 + namespace: komodo +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: ferretdb-2 + app.kubernetes.io/name: komodo + app.kubernetes.io/instance: komodo + template: + metadata: + labels: + app.kubernetes.io/controller: ferretdb-2 + app.kubernetes.io/instance: komodo + app.kubernetes.io/name: komodo + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: FERRETDB_POSTGRESQL_URL + valueFrom: + secretKeyRef: + key: uri + name: komodo-postgresql-17-fdb-cluster-app + image: ghcr.io/ferretdb/ferretdb:2.7.0 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi +--- +# Source: komodo/charts/komodo/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: komodo-main + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: komodo + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: komodo + helm.sh/chart: komodo-4.4.0 + namespace: komodo +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: komodo + app.kubernetes.io/instance: komodo + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: komodo + app.kubernetes.io/name: komodo + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: COMPOSE_LOGGING_DRIVER + value: local + - name: KOMODO_HOST + value: https://komodo.alexlebens.net + - name: KOMODO_TITLE + value: Komodo + - name: PASSKEY + valueFrom: + secretKeyRef: + key: passkey + name: komodo-secret + - name: KOMODO_MONITORING_INTERVAL + value: 15-sec + - name: KOMODO_RESOURCE_POLL_INTERVAL + value: 5-min + - name: KOMODO_PASSKEY + valueFrom: + secretKeyRef: + key: passkey + name: komodo-secret + - name: KOMODO_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + key: webhook + name: komodo-secret + - name: KOMODO_JWT_SECRET + valueFrom: + secretKeyRef: + key: jwt + name: komodo-secret + - name: KOMODO_LOCAL_AUTH + value: "true" + - name: KOMODO_ENABLE_NEW_USERS + value: "true" + - name: KOMODO_DISABLE_NON_ADMIN_CREATE + value: "true" + - name: KOMODO_TRANSPARENT_MODE + value: "false" + - name: PERIPHERY_SSL_ENABLED + value: "false" + - name: DB_USERNAME + valueFrom: + secretKeyRef: + key: user + name: komodo-postgresql-17-fdb-cluster-app + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: komodo-postgresql-17-fdb-cluster-app + - name: KOMODO_DATABASE_URI + value: mongodb://$(DB_USERNAME):$(DB_PASSWORD)@komodo-ferretdb-2.komodo:27017/komodo + - name: KOMODO_OIDC_ENABLED + value: "true" + - name: KOMODO_OIDC_PROVIDER + value: http://authentik-server.authentik/application/o/komodo/ + - name: KOMODO_OIDC_REDIRECT_HOST + value: https://authentik.alexlebens.net + - name: KOMODO_OIDC_CLIENT_ID + valueFrom: + secretKeyRef: + key: oidc-client-id + name: komodo-secret + - name: KOMODO_OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: oidc-client-secret + name: komodo-secret + - name: KOMODO_OIDC_USE_FULL_EMAIL + value: "true" + image: ghcr.io/moghtech/komodo-core:1.19.5 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 128Mi + volumeMounts: + - mountPath: /repo-cache + name: cache + - mountPath: /syncs + name: syncs + volumes: + - name: cache + persistentVolumeClaim: + claimName: komodo-cache + - name: syncs + persistentVolumeClaim: + claimName: komodo-syncs +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/cluster.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: komodo-postgresql-17-fdb-cluster + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + instances: 3 + imageName: "ghcr.io/ferretdb/postgres-documentdb:17-0.106.0-ferretdb-2.5.0" + imagePullPolicy: IfNotPresent + postgresUID: 999 + postgresGID: 999 + plugins: + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: false + parameters: + barmanObjectName: "komodo-postgresql-17-fdb-external-backup" + serverName: "komodo-postgresql-17-fdb-backup-2" + - name: barman-cloud.cloudnative-pg.io + enabled: true + isWALArchiver: true + parameters: + barmanObjectName: "komodo-postgresql-17-fdb-garage-local-backup" + serverName: "komodo-postgresql-17-fdb-backup-1" + + externalClusters: + - name: recovery + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "komodo-postgresql-17-fdb-recovery" + serverName: komodo-postgresql-17-fdb-backup-1 + + storage: + size: 10Gi + storageClass: local-path + walStorage: + size: 2Gi + storageClass: local-path + resources: + limits: + hugepages-2Mi: 256Mi + requests: + cpu: 100m + memory: 256Mi + + affinity: + enablePodAntiAffinity: true + topologyKey: kubernetes.io/hostname + + primaryUpdateMethod: switchover + primaryUpdateStrategy: unsupervised + logLevel: info + enableSuperuserAccess: true + enablePDB: true + + postgresql: + shared_preload_libraries: + - pg_cron + - pg_documentdb_core + - pg_documentdb + pg_hba: + - host ferretDB postgres localhost trust + - host ferretDB ferret localhost trust + parameters: + cron.database_name: ferretDB + documentdb.enableBypassDocumentValidation: "true" + documentdb.enableCompact: "true" + documentdb.enableLetAndCollationForQueryMatch: "true" + documentdb.enableNowSystemVariable: "true" + documentdb.enableSchemaValidation: "true" + documentdb.enableSortbyIdPushDownToPrimaryKey: "true" + documentdb.enableUserCrud: "true" + documentdb.maxUserLimit: "100" + hot_standby_feedback: "on" + max_slot_wal_keep_size: 2000MB + shared_buffers: 128MB + + monitoring: + enablePodMonitor: true + disableDefaultQueries: false + + + + bootstrap: + initdb: + database: ferretDB + owner: ferret + postInitApplicationSQL: + - create extension if not exists pg_cron; + - create extension if not exists documentdb cascade; + - grant documentdb_admin_role to ferret; +--- +# Source: komodo/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: komodo-secret + namespace: komodo + labels: + app.kubernetes.io/name: komodo-secret + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: passkey + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/komodo/config + metadataPolicy: None + property: passkey + - secretKey: jwt + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/komodo/config + metadataPolicy: None + property: jwt + - secretKey: webhook + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/komodo/config + metadataPolicy: None + property: webhook + - secretKey: oidc-client-id + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/komodo + metadataPolicy: None + property: client + - secretKey: oidc-client-secret + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /authentik/oidc/komodo + metadataPolicy: None + property: secret +--- +# Source: komodo/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: komodo-postgresql-17-fdb-cluster-backup-secret + namespace: komodo + labels: + app.kubernetes.io/name: komodo-postgresql-17-fdb-cluster-backup-secret + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: access + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/postgres-backups + metadataPolicy: None + property: secret +--- +# Source: komodo/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: komodo-postgresql-17-cluster-backup-secret-garage + namespace: komodo + labels: + app.kubernetes.io/name: komodo-postgresql-17-cluster-backup-secret-garage + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: ACCESS_SECRET_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_SECRET_KEY + - secretKey: ACCESS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/postgres-backups + metadataPolicy: None + property: ACCESS_REGION +--- +# Source: komodo/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: https-route-komodo + namespace: komodo + labels: + app.kubernetes.io/name: https-route-komodo + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - komodo.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: komodo-main + port: 80 + weight: 100 +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "komodo-postgresql-17-fdb-external-backup" + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 30d + configuration: + destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/komodo/komodo-postgresql-17-fdb-cluster + endpointURL: https://nyc3.digitaloceanspaces.com + s3Credentials: + accessKeyId: + name: komodo-postgresql-17-fdb-cluster-backup-secret + key: ACCESS_KEY_ID + secretAccessKey: + name: komodo-postgresql-17-fdb-cluster-backup-secret + key: ACCESS_SECRET_KEY +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "komodo-postgresql-17-fdb-garage-local-backup" + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + retentionPolicy: 3d + configuration: + destinationPath: s3://postgres-backups/cl01tl/komodo/komodo-postgresql-17-cluster + endpointURL: http://garage-main.garage:3900 + s3Credentials: + accessKeyId: + name: komodo-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: komodo-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY + region: + name: komodo-postgresql-17-cluster-backup-secret-garage + key: ACCESS_REGION +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: "komodo-postgresql-17-fdb-recovery" + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + configuration: + destinationPath: s3://postgres-backups/cl01tl/komodo/komodo-postgresql-17-fdb-cluster + endpointURL: http://garage-main.garage:3900 + wal: + compression: snappy + maxParallel: 1 + data: + compression: snappy + jobs: 1 + s3Credentials: + accessKeyId: + name: komodo-postgresql-17-cluster-backup-secret-garage + key: ACCESS_KEY_ID + secretAccessKey: + name: komodo-postgresql-17-cluster-backup-secret-garage + key: ACCESS_SECRET_KEY +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/prometheus-rule.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: komodo-postgresql-17-fdb-alert-rules + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + groups: + - name: cloudnative-pg/komodo-postgresql-17-fdb + rules: + - alert: CNPGClusterBackendsWaitingWarning + annotations: + summary: CNPG Cluster a backend is waiting for longer than 5 minutes. + description: |- + Pod {{ $labels.pod }} + has been waiting for longer than 5 minutes + expr: | + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterDatabaseDeadlockConflictsWarning + annotations: + summary: CNPG Cluster has over 10 deadlock conflicts. + description: |- + There are over 10 deadlock conflicts in + {{ $labels.pod }} + expr: | + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterHACritical + annotations: + summary: CNPG Cluster has no standby replicas! + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe + risk of data loss and downtime if the primary instance fails. + + The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint + will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main. + + This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less + instances. The replaced instance may need some time to catch-up with the cluster primary instance. + + This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this + case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="komodo"} - cnpg_pg_replication_is_wal_receiver_up{namespace="komodo"}) < 1 + for: 5m + labels: + severity: critical + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterHAWarning + annotations: + summary: CNPG Cluster less than 2 standby replicas. + description: |- + CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting + your cluster at risk if another instance fails. The cluster is still able to operate normally, although + the `-ro` and `-r` endpoints operate at reduced capacity. + + This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may + need some time to catch-up with the cluster primary instance. + + This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. + In this case you may want to silence it. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + expr: | + max by (job) (cnpg_pg_replication_streaming_replicas{namespace="komodo"} - cnpg_pg_replication_is_wal_receiver_up{namespace="komodo"}) < 2 + for: 5m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterHighConnectionsCritical + annotations: + summary: CNPG Instance maximum number of connections critical! + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 100 > 95 + for: 5m + labels: + severity: critical + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterHighConnectionsWarning + annotations: + summary: CNPG Instance is approaching the maximum number of connections. + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of + the maximum number of connections. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + expr: | + sum by (pod) (cnpg_backends_total{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 100 > 80 + for: 5m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterHighReplicationLag + annotations: + summary: CNPG Cluster high replication lag + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is experiencing a high replication lag of + {{`{{`}} $value {{`}}`}}ms. + + High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + expr: | + max(cnpg_pg_replication_lag{namespace="komodo",pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 1000 > 1000 + for: 5m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterInstancesOnSameNode + annotations: + summary: CNPG Cluster instances are located on the same node. + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has {{`{{`}} $value {{`}}`}} + instances on the same node {{`{{`}} $labels.node {{`}}`}}. + + A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + expr: | + count by (node) (kube_pod_info{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) > 1 + for: 5m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterLongRunningTransactionWarning + annotations: + summary: CNPG Cluster query is taking longer than 5 minutes. + description: |- + CloudNativePG Cluster Pod {{ $labels.pod }} + is taking more than 5 minutes (300 seconds) for a query. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterLowDiskSpaceCritical + annotations: + summary: CNPG Instance is running out of disk space! + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"})) > 0.9 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} + ) > 0.9 + for: 5m + labels: + severity: critical + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterLowDiskSpaceWarning + annotations: + summary: CNPG Instance is running out of disk space. + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + expr: | + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"})) > 0.7 OR + max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR + max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"}) + / + sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"}) + * + on(namespace, persistentvolumeclaim) group_left(volume) + kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} + ) > 0.7 + for: 5m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterOffline + annotations: + summary: CNPG Cluster has no running instances! + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has no ready instances. + + Having an offline cluster means your applications will not be able to access the database, leading to + potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + expr: | + (count(cnpg_collector_up{namespace="komodo",pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0 + for: 5m + labels: + severity: critical + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterPGDatabaseXidAgeWarning + annotations: + summary: CNPG Cluster has a number of transactions from the frozen XID to the current one. + description: |- + Over 300,000,000 transactions from frozen xid + on pod {{ $labels.pod }} + expr: | + cnpg_pg_database_xid_age > 300000000 + for: 1m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterPGReplicationWarning + annotations: + summary: CNPG Cluster standby is lagging behind the primary. + description: |- + Standby is lagging behind by over 300 seconds (5 minutes) + expr: | + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterReplicaFailingReplicationWarning + annotations: + summary: CNPG Cluster has a replica is failing to replicate. + description: |- + Replica {{ $labels.pod }} + is failing to replicate + expr: | + cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up + for: 1m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster + - alert: CNPGClusterZoneSpreadWarning + annotations: + summary: CNPG Cluster instances in the same zone. + description: |- + CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has instances in the same availability zone. + + A disaster in one availability zone will lead to a potential service disruption and/or data loss. + runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + expr: | + 3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + for: 5m + labels: + severity: warning + namespace: komodo + cnpg_cluster: komodo-postgresql-17-fdb-cluster +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "komodo-postgresql-17-fdb-daily-backup-scheduled-backup" + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: false + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: komodo-postgresql-17-fdb-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "komodo-postgresql-17-fdb-external-backup" +--- +# Source: komodo/charts/postgresql-17-fdb-cluster/templates/scheduled-backup.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: "komodo-postgresql-17-fdb-live-backup-scheduled-backup" + namespace: komodo + labels: + helm.sh/chart: postgresql-17-fdb-cluster-6.16.0 + app.kubernetes.io/name: komodo-postgresql-17-fdb + app.kubernetes.io/instance: komodo + app.kubernetes.io/part-of: komodo + app.kubernetes.io/version: "6.16.0" + app.kubernetes.io/managed-by: Helm +spec: + immediate: true + suspend: false + schedule: "0 0 0 * * *" + backupOwnerReference: self + cluster: + name: komodo-postgresql-17-fdb-cluster + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: "komodo-postgresql-17-fdb-garage-local-backup" diff --git a/clusters/cl01tl/manifests/kronic/kronic.yaml b/clusters/cl01tl/manifests/kronic/kronic.yaml new file mode 100644 index 000000000..4784330f3 --- /dev/null +++ b/clusters/cl01tl/manifests/kronic/kronic.yaml @@ -0,0 +1,228 @@ +--- +# Source: kronic/charts/kronic/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kronic + labels: + helm.sh/chart: kronic-0.1.7 + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/version: "v0.1.4" + app.kubernetes.io/managed-by: Helm +--- +# Source: kronic/charts/kronic/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: kronic-0.1.7 + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/version: "v0.1.4" + app.kubernetes.io/managed-by: Helm + name: kronic +rules: + - apiGroups: + - "" + resources: + - pods + - events + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + - cronjobs + - cronjobs/status + verbs: + - "*" +--- +# Source: kronic/charts/kronic/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: kronic-0.1.7 + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/version: "v0.1.4" + app.kubernetes.io/managed-by: Helm + name: kronic +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kronic +subjects: + - kind: ServiceAccount + name: kronic + namespace: "kronic" +--- +# Source: kronic/charts/kronic/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kronic + labels: + helm.sh/chart: kronic-0.1.7 + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/version: "v0.1.4" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic +--- +# Source: kronic/charts/kronic/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kronic + labels: + helm.sh/chart: kronic-0.1.7 + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/version: "v0.1.4" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + template: + metadata: + labels: + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + spec: + serviceAccountName: kronic + securityContext: + {} + containers: + - name: kronic + securityContext: + {} + image: "ghcr.io/mshade/kronic:v0.1.4" + imagePullPolicy: IfNotPresent + env: + - name: KRONIC_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KRONIC_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: kronic-config-secret + key: password + - name: KRONIC_ADMIN_USERNAME + value: "kronic" + - name: KRONIC_ALLOW_NAMESPACES + value: "gitea,vault,talos,libation,kubernetes-cloudflare-ddns" + - name: KRONIC_NAMESPACE_ONLY + value: "" + ports: + - name: http + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: http + readinessProbe: + httpGet: + path: /healthz + port: http + resources: + limits: + cpu: 1 + memory: 1024Mi + requests: + cpu: 10m + memory: 256Mi +--- +# Source: kronic/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: kronic-config-secret + namespace: kronic + labels: + app.kubernetes.io/name: kronic-config-secret + app.kubernetes.io/instance: kronic + app.kubernetes.io/part-of: kronic +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/kronic/auth + metadataPolicy: None + property: password +--- +# Source: kronic/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: https-route-kronic + namespace: kronic + labels: + app.kubernetes.io/name: https-route-kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/part-of: kronic +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - kronic.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: kronic + port: 80 + weight: 100 +--- +# Source: kronic/charts/kronic/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "kronic-test-connection" + labels: + helm.sh/chart: kronic-0.1.7 + app.kubernetes.io/name: kronic + app.kubernetes.io/instance: kronic + app.kubernetes.io/version: "v0.1.4" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['kronic:80/healthz'] + restartPolicy: Never diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/kube-prometheus-stack.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/kube-prometheus-stack.yaml new file mode 100644 index 000000000..2a87c1ff9 --- /dev/null +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/kube-prometheus-stack.yaml @@ -0,0 +1,5728 @@ +--- +# Source: kube-prometheus-stack/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: kube-prometheus-stack + labels: + app.kubernetes.io/name: kube-prometheus-stack + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack + name: kube-prometheus-stack-kube-state-metrics + namespace: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-prometheus-stack-prometheus-node-exporter + namespace: kube-prometheus-stack + labels: + helm.sh/chart: prometheus-node-exporter-4.49.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: prometheus-node-exporter + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "1.10.2" + release: kube-prometheus-stack +automountServiceAccountToken: false +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/alertmanager/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-prometheus-stack-alertmanager + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-alertmanager + app.kubernetes.io/name: kube-prometheus-stack-alertmanager + app.kubernetes.io/component: alertmanager + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +automountServiceAccountToken: true +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-prometheus-stack-operator + namespace: kube-prometheus-stack + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +automountServiceAccountToken: true +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-prometheus-stack-prometheus + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/name: kube-prometheus-stack-prometheus + app.kubernetes.io/component: prometheus + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +automountServiceAccountToken: true +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/alertmanager/secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: alertmanager-kube-prometheus-stack-alertmanager + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-alertmanager + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +data: + alertmanager.yaml: "Z2xvYmFsOgogIHJlc29sdmVfdGltZW91dDogNW0KaW5oaWJpdF9ydWxlczoKLSBlcXVhbDoKICAtIG5hbWVzcGFjZQogIC0gYWxlcnRuYW1lCiAgc291cmNlX21hdGNoZXJzOgogIC0gc2V2ZXJpdHkgPSBjcml0aWNhbAogIHRhcmdldF9tYXRjaGVyczoKICAtIHNldmVyaXR5ID1+IHdhcm5pbmd8aW5mbwotIGVxdWFsOgogIC0gbmFtZXNwYWNlCiAgLSBhbGVydG5hbWUKICBzb3VyY2VfbWF0Y2hlcnM6CiAgLSBzZXZlcml0eSA9IHdhcm5pbmcKICB0YXJnZXRfbWF0Y2hlcnM6CiAgLSBzZXZlcml0eSA9IGluZm8KLSBlcXVhbDoKICAtIG5hbWVzcGFjZQogIHNvdXJjZV9tYXRjaGVyczoKICAtIGFsZXJ0bmFtZSA9IEluZm9JbmhpYml0b3IKICB0YXJnZXRfbWF0Y2hlcnM6CiAgLSBzZXZlcml0eSA9IGluZm8KLSB0YXJnZXRfbWF0Y2hlcnM6CiAgLSBhbGVydG5hbWUgPSBJbmZvSW5oaWJpdG9yCnJlY2VpdmVyczoKLSBuYW1lOiBwdXNob3ZlcgogIHB1c2hvdmVyX2NvbmZpZ3M6CiAgLSBzZW5kX3Jlc29sdmVkOiB0cnVlCiAgICB0b2tlbl9maWxlOiAvZXRjL2FsZXJ0bWFuYWdlci9zZWNyZXRzL2FsZXJ0bWFuYWdlci1jb25maWctc2VjcmV0L3B1c2hvdmVyX3Rva2VuCiAgICB1c2VyX2tleV9maWxlOiAvZXRjL2FsZXJ0bWFuYWdlci9zZWNyZXRzL2FsZXJ0bWFuYWdlci1jb25maWctc2VjcmV0L3B1c2hvdmVyX3VzZXJfa2V5Ci0gbmFtZTogbnRmeQogIHdlYmhvb2tfY29uZmlnczoKICAtIGh0dHBfY29uZmlnOgogICAgICBiYXNpY19hdXRoOgogICAgICAgIHBhc3N3b3JkX2ZpbGU6IC9ldGMvYWxlcnRtYW5hZ2VyL3NlY3JldHMvYWxlcnRtYW5hZ2VyLWNvbmZpZy1zZWNyZXQvbnRmeV9wYXNzd29yZAogICAgICAgIHVzZXJuYW1lOiBudGZ5LWFsZXJ0bWFuYWdlcgogICAgdXJsOiBodHRwOi8vbnRmeS1hbGVydG1hbmFnZXIua3ViZS1wcm9tZXRoZXVzLXN0YWNrOjgwCnJvdXRlOgogIGdyb3VwX2J5OgogIC0gbmFtZXNwYWNlCiAgLSBhbGVydG5hbWUKICBncm91cF9pbnRlcnZhbDogNW0KICBncm91cF93YWl0OiAzMHMKICByZWNlaXZlcjogbnRmeQogIHJlcGVhdF9pbnRlcnZhbDogMjRoCiAgcm91dGVzOgogIC0gZ3JvdXBfaW50ZXJ2YWw6IDVtCiAgICBncm91cF93YWl0OiAxMHMKICAgIHJlY2VpdmVyOiBudGZ5CiAgICByZXBlYXRfaW50ZXJ2YWw6IDI0aAp0ZW1wbGF0ZXM6Ci0gL2V0Yy9hbGVydG1hbmFnZXIvY29uZmlnLyoudG1wbA==" +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack + name: kube-prometheus-stack-kube-state-metrics +rules: + +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] + +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] + +- apiGroups: ["apps"] + resources: + - daemonsets + verbs: ["list", "watch"] + +- apiGroups: ["apps"] + resources: + - deployments + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] + +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] + +- apiGroups: ["networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] + +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] + +- apiGroups: ["coordination.k8s.io"] + resources: + - leases + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] + +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] + +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] + +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] + +- apiGroups: ["apps"] + resources: + - replicasets + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] + +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] + +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] + +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] + +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] + +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/aggregate-clusterroles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-prometheus-stack-prometheus-crd-view + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +rules: +- apiGroups: ["monitoring.coreos.com"] + resources: ["alertmanagers", "alertmanagerconfigs", "podmonitors", "probes", "prometheuses", "prometheusagents", "prometheusrules", "scrapeconfigs", "servicemonitors"] + verbs: ["get", "list", "watch"] +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/aggregate-clusterroles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-prometheus-stack-prometheus-crd-edit + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +rules: +- apiGroups: ["monitoring.coreos.com"] + resources: ["alertmanagers", "alertmanagerconfigs", "podmonitors", "probes", "prometheuses", "prometheusagents", "prometheusrules", "scrapeconfigs", "servicemonitors"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-prometheus-stack-operator + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - alertmanagers/finalizers + - alertmanagers/status + - alertmanagerconfigs + - prometheuses + - prometheuses/finalizers + - prometheuses/status + - prometheusagents + - prometheusagents/finalizers + - prometheusagents/status + - thanosrulers + - thanosrulers/finalizers + - thanosrulers/status + - scrapeconfigs + - scrapeconfigs/status + - servicemonitors + - servicemonitors/status + - podmonitors + - podmonitors/status + - probes + - probes/status + - prometheusrules + verbs: + - '*' +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - delete +- apiGroups: + - "" + resources: + - services + - services/finalizers + - endpoints + verbs: + - get + - create + - update + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - patch + - create +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - create + - list + - watch + - update + - delete +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-prometheus-stack-prometheus + labels: + app: kube-prometheus-stack-prometheus + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +rules: +# These permissions (to examine all namespaces) are not in the kube-prometheus repo. +# They're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml +# kube-prometheus deliberately defaults to a more restrictive setup that is not appropriate for our general audience. +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: ["get", "list", "watch"] +- apiGroups: + - "networking.k8s.io" + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics", "/metrics/cadvisor"] + verbs: ["get"] +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack + name: kube-prometheus-stack-kube-state-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-prometheus-stack-kube-state-metrics +subjects: +- kind: ServiceAccount + name: kube-prometheus-stack-kube-state-metrics + namespace: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-prometheus-stack-operator + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-prometheus-stack-operator +subjects: +- kind: ServiceAccount + name: kube-prometheus-stack-operator + namespace: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-prometheus-stack-prometheus + labels: + app: kube-prometheus-stack-prometheus + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-prometheus-stack-prometheus +subjects: + - kind: ServiceAccount + name: kube-prometheus-stack-prometheus + namespace: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-kube-state-metrics + namespace: kube-prometheus-stack + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack + annotations: +spec: + type: "ClusterIP" + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: http + + selector: + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-prometheus-node-exporter + namespace: kube-prometheus-stack + labels: + helm.sh/chart: prometheus-node-exporter-4.49.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: prometheus-node-exporter + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "1.10.2" + release: kube-prometheus-stack + jobLabel: node-exporter + annotations: + prometheus.io/scrape: "true" +spec: + type: ClusterIP + ports: + - port: 9100 + targetPort: 9100 + protocol: TCP + name: http-metrics + selector: + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/alertmanager/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-alertmanager + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-alertmanager + self-monitor: "true" + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + ports: + - name: http-web + port: 9093 + targetPort: 9093 + protocol: TCP + - name: reloader-web + appProtocol: http + port: 8080 + targetPort: reloader-web + selector: + app.kubernetes.io/name: alertmanager + alertmanager: kube-prometheus-stack-alertmanager + sessionAffinity: None + type: "ClusterIP" +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/exporters/core-dns/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-coredns + labels: + app: kube-prometheus-stack-coredns + jobLabel: coredns + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/exporters/kube-etcd/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-kube-etcd + labels: + app: kube-prometheus-stack-kube-etcd + jobLabel: kube-etcd + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: 2381 + protocol: TCP + targetPort: 2381 + selector: + k8s-app: kube-controller-manager + type: ClusterIP +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-operator + namespace: kube-prometheus-stack + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +spec: + ports: + - name: https + port: 443 + targetPort: https + selector: + app: kube-prometheus-stack-operator + release: "kube-prometheus-stack" + type: "ClusterIP" +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: kube-prometheus-stack-prometheus + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-prometheus + self-monitor: "true" + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + ports: + - name: http-web + port: 9090 + targetPort: 9090 + - name: reloader-web + appProtocol: http + port: 8080 + targetPort: reloader-web + publishNotReadyAddresses: false + selector: + app.kubernetes.io/name: prometheus + operator.prometheus.io/name: kube-prometheus-stack-prometheus + sessionAffinity: None + type: "ClusterIP" +--- +# Source: kube-prometheus-stack/charts/ntfy-alertmanager/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: ntfy-alertmanager + labels: + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: kube-prometheus-stack + app.kubernetes.io/service: ntfy-alertmanager + helm.sh/chart: ntfy-alertmanager-4.4.0 + namespace: kube-prometheus-stack +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/name: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: node-ps10rp + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: node-ps10rp + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack + annotations: + tailscale.com/tailnet-fqdn: node-exporter-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: kube-prometheus-stack/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: garage-ps10rp + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: garage-ps10rp + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack + annotations: + tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net +spec: + externalName: placeholder + type: ExternalName +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-prometheus-stack-prometheus-node-exporter + namespace: kube-prometheus-stack + labels: + helm.sh/chart: prometheus-node-exporter-4.49.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: prometheus-node-exporter + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "1.10.2" + release: kube-prometheus-stack +spec: + selector: + matchLabels: + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + revisionHistoryLimit: 10 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + helm.sh/chart: prometheus-node-exporter-4.49.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: prometheus-node-exporter + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "1.10.2" + release: kube-prometheus-stack + jobLabel: node-exporter + spec: + automountServiceAccountToken: false + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: kube-prometheus-stack-prometheus-node-exporter + containers: + - name: node-exporter + image: quay.io/prometheus/node-exporter:v1.10.2 + imagePullPolicy: IfNotPresent + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --path.udev.data=/host/root/run/udev/data + - --web.listen-address=[$(HOST_IP)]:9100 + - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$ + securityContext: + readOnlyRootFilesystem: true + env: + - name: HOST_IP + value: 0.0.0.0 + ports: + - name: http-metrics + containerPort: 9100 + protocol: TCP + livenessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: + path: / + port: http-metrics + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: + path: / + port: http-metrics + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + - name: root + mountPath: /host/root + mountPropagation: HostToContainer + readOnly: true + hostNetwork: true + hostPID: true + hostIPC: false + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + - key: type + operator: NotIn + values: + - virtual-kubelet + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + - name: root + hostPath: + path: / +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-prometheus-stack-kube-state-metrics + namespace: kube-prometheus-stack + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + replicas: 1 + strategy: + type: RollingUpdate + revisionHistoryLimit: 10 + template: + metadata: + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack + spec: + automountServiceAccountToken: true + hostNetwork: false + serviceAccountName: kube-prometheus-stack-kube-state-metrics + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + dnsPolicy: ClusterFirst + containers: + - name: kube-state-metrics + args: + - --port=8080 + - --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments + imagePullPolicy: IfNotPresent + image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.17.0 + ports: + - containerPort: 8080 + name: http + livenessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: + path: /livez + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: + path: /readyz + port: 8081 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: + {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-prometheus-stack-operator + namespace: kube-prometheus-stack + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: kube-prometheus-stack-operator + release: "kube-prometheus-stack" + template: + metadata: + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator + spec: + containers: + - name: kube-prometheus-stack + image: "quay.io/prometheus-operator/prometheus-operator:v0.86.2" + imagePullPolicy: "IfNotPresent" + args: + - --kubelet-service=kube-system/kube-prometheus-stack-kubelet + - --kubelet-endpoints=true + - --kubelet-endpointslice=false + - --localhost=127.0.0.1 + - --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.86.2 + - --config-reloader-cpu-request=0 + - --config-reloader-cpu-limit=0 + - --config-reloader-memory-request=0 + - --config-reloader-memory-limit=0 + - --thanos-default-base-image=quay.io/thanos/thanos:v0.40.1 + - --secret-field-selector=type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1 + - --web.enable-tls=true + - --web.cert-file=/cert/cert + - --web.key-file=/cert/key + - --web.listen-address=:10250 + - --web.tls-min-version=VersionTLS13 + ports: + - containerPort: 10250 + name: https + env: + - name: GOGC + value: "30" + resources: + {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: tls-secret + mountPath: /cert + readOnly: true + readinessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + volumes: + - name: tls-secret + secret: + defaultMode: 420 + secretName: kube-prometheus-stack-admission + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + serviceAccountName: kube-prometheus-stack-operator + automountServiceAccountToken: true + terminationGracePeriodSeconds: 30 +--- +# Source: kube-prometheus-stack/charts/ntfy-alertmanager/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ntfy-alertmanager + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: kube-prometheus-stack + helm.sh/chart: ntfy-alertmanager-4.4.0 + namespace: kube-prometheus-stack +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: kube-prometheus-stack + app.kubernetes.io/instance: kube-prometheus-stack + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/name: kube-prometheus-stack + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - image: xenrox/ntfy-alertmanager:0.5.0 + imagePullPolicy: IfNotPresent + name: main + volumeMounts: + - mountPath: /etc/ntfy-alertmanager/config + mountPropagation: None + name: config + readOnly: true + subPath: config + volumes: + - name: config + secret: + secretName: ntfy-alertmanager-config-secret +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/alertmanager/alertmanager.yaml +apiVersion: monitoring.coreos.com/v1 +kind: Alertmanager +metadata: + name: kube-prometheus-stack-alertmanager + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-alertmanager + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + image: "quay.io/prometheus/alertmanager:v0.29.0" + imagePullPolicy: "IfNotPresent" + version: v0.29.0 + replicas: 1 + listenLocal: false + serviceAccountName: kube-prometheus-stack-alertmanager + automountServiceAccountToken: true + externalUrl: http://kube-prometheus-stack-alertmanager.kube-prometheus-stack:9093 + paused: false + logFormat: "logfmt" + logLevel: "info" + retention: "120h" + secrets: + - alertmanager-config-secret + alertmanagerConfigSelector: {} + alertmanagerConfigNamespaceSelector: + {} + routePrefix: "/" + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [alertmanager]} + - {key: alertmanager, operator: In, values: [kube-prometheus-stack-alertmanager]} + portName: http-web +--- +# Source: kube-prometheus-stack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: alertmanager-config-secret + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: alertmanager-config-secret + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: pushover_token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /pushover/key + metadataPolicy: None + property: alertmanager_key + - secretKey: pushover_user_key + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /pushover/key + metadataPolicy: None + property: user_key + - secretKey: ntfy_password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/kube-prometheus-stack/ntfy-alertmanager + metadataPolicy: None + property: ntfy_password +--- +# Source: kube-prometheus-stack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: garage-metric-secret + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: garage-metric-secret + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: token + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/token + metadataPolicy: None + property: metric +--- +# Source: kube-prometheus-stack/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: ntfy-alertmanager-config-secret + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: ntfy-alertmanager-config-secret + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: ntfy_password + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/kube-prometheus-stack/ntfy-alertmanager + metadataPolicy: None + property: ntfy_password + - secretKey: config + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/kube-prometheus-stack/ntfy-alertmanager + metadataPolicy: None + property: config +--- +# Source: kube-prometheus-stack/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-prometheus + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: http-route-prometheus + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - prometheus.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: prometheus-operated + port: 9090 + weight: 100 +--- +# Source: kube-prometheus-stack/templates/http-route.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: http-route-alertmanager + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: http-route-alertmanager + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + parentRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: traefik-gateway + namespace: traefik + hostnames: + - alertmanager.alexlebens.net + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - group: '' + kind: Service + name: kube-prometheus-stack-alertmanager + port: 9093 + weight: 100 +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: kube-prometheus-stack-admission + annotations: + + argocd.argoproj.io/hook: PreSync + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +webhooks: + - name: prometheusrulemutate.monitoring.coreos.com + failurePolicy: Ignore + rules: + - apiGroups: + - monitoring.coreos.com + apiVersions: + - "*" + resources: + - prometheusrules + operations: + - CREATE + - UPDATE + clientConfig: + service: + namespace: kube-prometheus-stack + name: kube-prometheus-stack-operator + path: /admission-prometheusrules/mutate + timeoutSeconds: 10 + admissionReviewVersions: ["v1", "v1beta1"] + sideEffects: None +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/prometheus.yaml +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: kube-prometheus-stack-prometheus + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-prometheus + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + automountServiceAccountToken: true + alerting: + alertmanagers: + - namespace: kube-prometheus-stack + name: kube-prometheus-stack-alertmanager + port: http-web + pathPrefix: "/" + apiVersion: v2 + image: "quay.io/prometheus/prometheus:v3.7.3" + imagePullPolicy: "IfNotPresent" + version: v3.7.3 + externalUrl: "https://prometheus.alexlebens.net" + paused: false + replicas: 1 + shards: 1 + logLevel: "info" + logFormat: logfmt + listenLocal: false + enableOTLPReceiver: false + enableAdminAPI: false + scrapeInterval: 30s + retention: "30d" + tsdb: + outOfOrderTimeWindow: 0s + walCompression: true + routePrefix: "/" + serviceAccountName: kube-prometheus-stack-prometheus + serviceMonitorSelector: {} + serviceMonitorNamespaceSelector: {} + podMonitorSelector: {} + podMonitorNamespaceSelector: {} + probeSelector: + matchLabels: + release: "kube-prometheus-stack" + + probeNamespaceSelector: {} + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + ruleNamespaceSelector: {} + ruleSelector: {} + scrapeConfigSelector: {} + scrapeConfigNamespaceSelector: {} + storage: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: synology-iscsi-delete + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [prometheus]} + - {key: app.kubernetes.io/instance, operator: In, values: [kube-prometheus-stack-prometheus]} + portName: http-web + hostNetwork: false +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/alertmanager.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-alertmanager.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: alertmanager.rules + rules: + - alert: AlertmanagerFailedReload + annotations: + description: Configuration has failed to load for {{ $labels.namespace }}/{{ $labels.pod}}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedreload + summary: Reloading an Alertmanager configuration has failed. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(alertmanager_config_last_reload_successful{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[5m]) == 0 + for: 10m + labels: + severity: critical + - alert: AlertmanagerMembersInconsistent + annotations: + description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only found {{ $value }} members of the {{$labels.job}} cluster. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagermembersinconsistent + summary: A member of an Alertmanager cluster has not found all other cluster members. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(alertmanager_cluster_members{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[5m]) + < on (namespace,service,cluster) group_left + count by (namespace,service,cluster) (max_over_time(alertmanager_cluster_members{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[5m])) + for: 15m + labels: + severity: critical + - alert: AlertmanagerFailedToSendAlerts + annotations: + description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedtosendalerts + summary: An Alertmanager instance failed to send notifications. + expr: |- + ( + rate(alertmanager_notifications_failed_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[15m]) + / + ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[15m]) + ) + > 0.01 + for: 5m + labels: + severity: warning + - alert: AlertmanagerClusterFailedToSendAlerts + annotations: + description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts + summary: All Alertmanager instances in a cluster failed to send notifications to a critical integration. + expr: |- + min by (namespace,service, integration) ( + rate(alertmanager_notifications_failed_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration=~`.*`}[15m]) + / + ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration=~`.*`}[15m]) + ) + > 0.01 + for: 5m + labels: + severity: critical + - alert: AlertmanagerClusterFailedToSendAlerts + annotations: + description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts + summary: All Alertmanager instances in a cluster failed to send notifications to a non-critical integration. + expr: |- + min by (namespace,service, integration) ( + rate(alertmanager_notifications_failed_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration!~`.*`}[15m]) + / + ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration!~`.*`}[15m]) + ) + > 0.01 + for: 5m + labels: + severity: warning + - alert: AlertmanagerConfigInconsistent + annotations: + description: Alertmanager instances within the {{$labels.job}} cluster have different configurations. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerconfiginconsistent + summary: Alertmanager instances within the same cluster have different configurations. + expr: |- + count by (namespace,service,cluster) ( + count_values by (namespace,service,cluster) ("config_hash", alertmanager_config_hash{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}) + ) + != 1 + for: 20m + labels: + severity: critical + - alert: AlertmanagerClusterDown + annotations: + description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have been up for less than half of the last 5m.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterdown + summary: Half or more of the Alertmanager instances within the same cluster are down. + expr: |- + ( + count by (namespace,service,cluster) ( + avg_over_time(up{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[5m]) < 0.5 + ) + / + count by (namespace,service,cluster) ( + up{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"} + ) + ) + >= 0.5 + for: 5m + labels: + severity: critical + - alert: AlertmanagerClusterCrashlooping + annotations: + description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have restarted at least 5 times in the last 10m.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclustercrashlooping + summary: Half or more of the Alertmanager instances within the same cluster are crashlooping. + expr: |- + ( + count by (namespace,service,cluster) ( + changes(process_start_time_seconds{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"}[10m]) > 4 + ) + / + count by (namespace,service,cluster) ( + up{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack"} + ) + ) + >= 0.5 + for: 5m + labels: + severity: critical +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/config-reloaders.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-config-reloaders + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: config-reloaders + rules: + - alert: ConfigReloaderSidecarErrors + annotations: + description: 'Errors encountered while the {{$labels.pod}} config-reloader sidecar attempts to sync config in {{$labels.namespace}} namespace. + + As a result, configuration for service running in {{$labels.pod}} may be stale and cannot be updated anymore.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/configreloadersidecarerrors + summary: config-reloader sidecar has not had a successful reload for 10m + expr: max_over_time(reloader_last_reload_successful{namespace=~".+"}[5m]) == 0 + for: 10m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/etcd.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-etcd + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: etcd + rules: + - alert: etcdMembersDown + annotations: + description: 'etcd cluster "{{ $labels.job }}": members are down ({{ $value }}).' + summary: etcd cluster members are down. + expr: |- + max without (endpoint) ( + sum without (instance, pod) (up{job=~".*etcd.*"} == bool 0) + or + count without (To) ( + sum without (instance, pod) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01 + ) + ) + > 0 + for: 20m + labels: + severity: warning + - alert: etcdInsufficientMembers + annotations: + description: 'etcd cluster "{{ $labels.job }}": insufficient members ({{ $value }}).' + summary: etcd cluster has insufficient number of members. + expr: sum(up{job=~".*etcd.*"} == bool 1) without (instance, pod) < ((count(up{job=~".*etcd.*"}) without (instance, pod) + 1) / 2) + for: 3m + labels: + severity: critical + - alert: etcdNoLeader + annotations: + description: 'etcd cluster "{{ $labels.job }}": member {{ $labels.instance }} has no leader.' + summary: etcd cluster has no leader. + expr: etcd_server_has_leader{job=~".*etcd.*"} == 0 + for: 1m + labels: + severity: critical + - alert: etcdHighNumberOfLeaderChanges + annotations: + description: 'etcd cluster "{{ $labels.job }}": {{ $value }} leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.' + summary: etcd cluster has high number of leader changes. + expr: increase((max without (instance, pod) (etcd_server_leader_changes_seen_total{job=~".*etcd.*"}) or 0*absent(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}))[15m:1m]) >= 4 + for: 5m + labels: + severity: warning + - alert: etcdHighNumberOfFailedGRPCRequests + annotations: + description: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' + summary: etcd cluster has high number of failed grpc requests. + expr: |- + 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) + / + sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code) + > 1 + for: 10m + labels: + severity: warning + - alert: etcdHighNumberOfFailedGRPCRequests + annotations: + description: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' + summary: etcd cluster has high number of failed grpc requests. + expr: |- + 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) + / + sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code) + > 5 + for: 5m + labels: + severity: critical + - alert: etcdGRPCRequestsSlow + annotations: + description: 'etcd cluster "{{ $labels.job }}": 99th percentile of gRPC requests is {{ $value }}s on etcd instance {{ $labels.instance }} for {{ $labels.grpc_method }} method.' + summary: etcd grpc requests are slow + expr: |- + histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) + > 0.15 + for: 10m + labels: + severity: critical + - alert: etcdMemberCommunicationSlow + annotations: + description: 'etcd cluster "{{ $labels.job }}": member communication with {{ $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.' + summary: etcd cluster member communication is slow. + expr: |- + histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.15 + for: 10m + labels: + severity: warning + - alert: etcdHighNumberOfFailedProposals + annotations: + description: 'etcd cluster "{{ $labels.job }}": {{ $value }} proposal failures within the last 30 minutes on etcd instance {{ $labels.instance }}.' + summary: etcd cluster has high number of proposal failures. + expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5 + for: 15m + labels: + severity: warning + - alert: etcdHighFsyncDurations + annotations: + description: 'etcd cluster "{{ $labels.job }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' + summary: etcd cluster 99th percentile fsync durations are too high. + expr: |- + histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.5 + for: 10m + labels: + severity: warning + - alert: etcdHighFsyncDurations + annotations: + description: 'etcd cluster "{{ $labels.job }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' + summary: etcd cluster 99th percentile fsync durations are too high. + expr: |- + histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 1 + for: 10m + labels: + severity: critical + - alert: etcdHighCommitDurations + annotations: + description: 'etcd cluster "{{ $labels.job }}": 99th percentile commit durations {{ $value }}s on etcd instance {{ $labels.instance }}.' + summary: etcd cluster 99th percentile commit durations are too high. + expr: |- + histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.25 + for: 10m + labels: + severity: warning + - alert: etcdDatabaseQuotaLowSpace + annotations: + description: 'etcd cluster "{{ $labels.job }}": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.' + summary: etcd cluster database is running full. + expr: (last_over_time(etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"}[5m]) / last_over_time(etcd_server_quota_backend_bytes{job=~".*etcd.*"}[5m]))*100 > 95 + for: 10m + labels: + severity: critical + - alert: etcdExcessiveDatabaseGrowth + annotations: + description: 'etcd cluster "{{ $labels.job }}": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.' + summary: etcd cluster database growing very fast. + expr: predict_linear(etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"}[4h], 4*60*60) > etcd_server_quota_backend_bytes{job=~".*etcd.*"} + for: 10m + labels: + severity: warning + - alert: etcdDatabaseHighFragmentationRatio + annotations: + description: 'etcd cluster "{{ $labels.job }}": database size in use on instance {{ $labels.instance }} is {{ $value | humanizePercentage }} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.' + runbook_url: https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation + summary: etcd database size in use is less than 50% of the actual allocated storage. + expr: (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes{job=~".*etcd.*"}[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"}[5m])) < 0.5 and etcd_mvcc_db_total_size_in_use_in_bytes{job=~".*etcd.*"} > 104857600 + for: 10m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/general.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-general.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: general.rules + rules: + - alert: TargetDown + annotations: + description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/targetdown + summary: One or more targets are unreachable. + expr: 100 * (count(up == 0) BY (cluster, job, namespace, service) / count(up) BY (cluster, job, namespace, service)) > 10 + for: 10m + labels: + severity: warning + - alert: Watchdog + annotations: + description: 'This is an alert meant to ensure that the entire alerting pipeline is functional. + + This alert is always firing, therefore it should always be firing in Alertmanager + + and always fire against a receiver. There are integrations with various notification + + mechanisms that send a notification when this alert is not firing. For example the + + "DeadMansSnitch" integration in PagerDuty. + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/watchdog + summary: An alert that should always be firing to certify that Alertmanager is working properly. + expr: vector(1) + labels: + severity: none + - alert: InfoInhibitor + annotations: + description: 'This is an alert that is used to inhibit info alerts. + + By themselves, the info-level alerts are sometimes very noisy, but they are relevant when combined with + + other alerts. + + This alert fires whenever there''s a severity="info" alert, and stops firing when another alert with a + + severity of ''warning'' or ''critical'' starts firing on the same namespace. + + This alert should be routed to a null receiver and configured to inhibit alerts with severity="info". + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/infoinhibitor + summary: Info-level alert inhibition. + expr: ALERTS{severity = "info"} == 1 unless on (namespace) ALERTS{alertname != "InfoInhibitor", severity =~ "warning|critical", alertstate="firing"} == 1 + labels: + severity: none +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_cpu_usage_seconds_total.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.container-cpu-usage-seconds-tot + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.container_cpu_usage_seconds_total + rules: + - expr: |- + sum by (cluster, namespace, pod, container) ( + rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) + ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( + 1, max by (cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate5m + - expr: |- + sum by (cluster, namespace, pod, container) ( + irate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) + ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( + 1, max by (cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_cache.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.container-memory-cache + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.container_memory_cache + rules: + - expr: |- + container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1, + max by (cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_cache +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_rss.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.container-memory-rss + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.container_memory_rss + rules: + - expr: |- + container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1, + max by (cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_rss +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_swap.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.container-memory-swap + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.container_memory_swap + rules: + - expr: |- + container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1, + max by (cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_swap +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_working_set_bytes.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.container-memory-working-set-by + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.container_memory_working_set_bytes + rules: + - expr: |- + container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1, + max by (cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_working_set_bytes +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_resource.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.container-resource + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.container_resource + rules: + - expr: |- + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} + ) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_memory:kube_pod_container_resource_requests:sum + - expr: |- + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} + ) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_cpu:kube_pod_container_resource_requests:sum + - expr: |- + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_memory:active:kube_pod_container_resource_limits + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} + ) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_memory:kube_pod_container_resource_limits:sum + - expr: |- + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} + ) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_cpu:kube_pod_container_resource_limits:sum +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.pod_owner.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-k8s.rules.pod-owner + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: k8s.rules.pod_owner + rules: + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, + "replicaset", "$1", "owner_name", "(.*)" + ) * on (cluster, replicaset, namespace) group_left(owner_name) topk by (cluster, replicaset, namespace) ( + 1, max by (cluster, replicaset, namespace, owner_name) ( + kube_replicaset_owner{job="kube-state-metrics", owner_kind=""} + ) + ), + "workload", "$1", "replicaset", "(.*)" + ) + ) + labels: + workload_type: replicaset + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, + "replicaset", "$1", "owner_name", "(.*)" + ) * on (replicaset, namespace, cluster) group_left(owner_name) topk by (cluster, replicaset, namespace) ( + 1, max by (cluster, replicaset, namespace, owner_name) ( + kube_replicaset_owner{job="kube-state-metrics", owner_kind="Deployment"} + ) + ), + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: deployment + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: daemonset + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, + "workload", "$1", "owner_name", "(.*)") + ) + labels: + workload_type: statefulset + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + group by (cluster, namespace, workload, pod) ( + label_join( + group by (cluster, namespace, job_name, pod, owner_name) ( + label_join( + kube_pod_owner{job="kube-state-metrics", owner_kind="Job"} + , "job_name", "", "owner_name") + ) + * on (cluster, namespace, job_name) group_left() + group by (cluster, namespace, job_name) ( + kube_job_owner{job="kube-state-metrics", owner_kind=~"Pod|"} + ) + , "workload", "", "owner_name") + ) + labels: + workload_type: job + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="", owner_name=""}, + "workload", "$1", "pod", "(.+)") + ) + labels: + workload_type: barepod + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="Node"}, + "workload", "$1", "pod", "(.+)") + ) + labels: + workload_type: staticpod + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + group by (cluster, namespace, workload, workload_type, pod) ( + label_join( + label_join( + group by (cluster, namespace, job_name, pod) ( + label_join( + kube_pod_owner{job="kube-state-metrics", owner_kind="Job"} + , "job_name", "", "owner_name") + ) + * on (cluster, namespace, job_name) group_left(owner_kind, owner_name) + group by (cluster, namespace, job_name, owner_kind, owner_name) ( + kube_job_owner{job="kube-state-metrics", owner_kind!="Pod", owner_kind!=""} + ) + , "workload", "", "owner_name") + , "workload_type", "", "owner_kind") + + OR + + label_replace( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"} + , "replicaset", "$1", "owner_name", "(.+)" + ) + * on (cluster, namespace, replicaset) group_left(owner_kind, owner_name) + group by (cluster, namespace, replicaset, owner_kind, owner_name) ( + kube_replicaset_owner{job="kube-state-metrics", owner_kind!="Deployment", owner_kind!=""} + ) + , "workload", "$1", "owner_name", "(.+)") + OR + label_replace( + group by (cluster, namespace, pod, owner_name, owner_kind) ( + kube_pod_owner{job="kube-state-metrics", owner_kind!="ReplicaSet", owner_kind!="DaemonSet", owner_kind!="StatefulSet", owner_kind!="Job", owner_kind!="Node", owner_kind!=""} + ) + , "workload", "$1", "owner_name", "(.+)" + ) + , "workload_type", "$1", "owner_kind", "(.+)") + ) + record: namespace_workload_pod:kube_pod_owner:relabel +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-apiserver-availability.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - interval: 3m + name: kube-apiserver-availability.rules + rules: + - expr: avg_over_time(code_verb:apiserver_request_total:increase1h[30d]) * 24 * 30 + record: code_verb:apiserver_request_total:increase30d + - expr: sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"}) + labels: + verb: read + record: code:apiserver_request_total:increase30d + - expr: sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + labels: + verb: write + record: code:apiserver_request_total:increase30d + - expr: sum by (cluster, verb, scope, le) (increase(apiserver_request_sli_duration_seconds_bucket[1h])) + record: cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h + - expr: sum by (cluster, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h[30d]) * 24 * 30) + record: cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d + - expr: sum by (cluster, verb, scope) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h{le="+Inf"}) + record: cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase1h + - expr: sum by (cluster, verb, scope) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{le="+Inf"}) + record: cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d + - expr: |- + 1 - ( + ( + # write too slow + sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + - + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le=~"1(\\.0)?"} or vector(0)) + ) + + ( + # read too slow + sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"LIST|GET"}) + - + ( + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le=~"1(\\.0)?"} or vector(0)) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le=~"5(\\.0)?"} or vector(0)) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le=~"30(\\.0)?"} or vector(0)) + ) + ) + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d) + labels: + verb: all + record: apiserver_request:availability30d + - expr: |- + 1 - ( + sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"LIST|GET"}) + - + ( + # too slow + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le=~"1(\\.0)?"} or vector(0)) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le=~"5(\\.0)?"} or vector(0)) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le=~"30(\\.0)?"} or vector(0)) + ) + + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{verb="read",code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d{verb="read"}) + labels: + verb: read + record: apiserver_request:availability30d + - expr: |- + 1 - ( + ( + # too slow + sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + - + sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le=~"1(\\.0)?"} or vector(0)) + ) + + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{verb="write",code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d{verb="write"}) + labels: + verb: write + record: apiserver_request:availability30d + - expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: code_resource:apiserver_request_total:rate5m + - expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: code_resource:apiserver_request_total:rate5m + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-burnrate.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-apiserver-burnrate.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kube-apiserver-burnrate.rules + rules: + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1d])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[1d])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[1d])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[1d])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d])) + labels: + verb: read + record: apiserver_request:burnrate1d + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[1h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[1h])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[1h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h])) + labels: + verb: read + record: apiserver_request:burnrate1h + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[2h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[2h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[2h])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[2h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h])) + labels: + verb: read + record: apiserver_request:burnrate2h + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[30m])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[30m])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[30m])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[30m])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m])) + labels: + verb: read + record: apiserver_request:burnrate30m + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[3d])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[3d])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[3d])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[3d])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d])) + labels: + verb: read + record: apiserver_request:burnrate3d + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[5m])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[5m])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[5m])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: apiserver_request:burnrate5m + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[6h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le=~"1(\\.0)?"}[6h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le=~"5(\\.0)?"}[6h])) + + + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le=~"30(\\.0)?"}[6h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h])) + labels: + verb: read + record: apiserver_request:burnrate6h + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1d])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[1d])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + labels: + verb: write + record: apiserver_request:burnrate1d + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1h])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[1h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + labels: + verb: write + record: apiserver_request:burnrate1h + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[2h])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[2h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + labels: + verb: write + record: apiserver_request:burnrate2h + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[30m])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[30m])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + labels: + verb: write + record: apiserver_request:burnrate30m + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[3d])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[3d])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + labels: + verb: write + record: apiserver_request:burnrate3d + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[5m])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: apiserver_request:burnrate5m + - expr: |- + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[6h])) + - + sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le=~"1(\\.0)?"}[6h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + labels: + verb: write + record: apiserver_request:burnrate6h +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-histogram.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-apiserver-histogram.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kube-apiserver-histogram.rules + rules: + - expr: histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0 + labels: + quantile: '0.99' + verb: read + record: cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0 + labels: + quantile: '0.99' + verb: write + record: cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-apiserver-slos + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kube-apiserver-slos + rules: + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum by (cluster) (apiserver_request:burnrate1h) > (14.40 * 0.01000) + and on (cluster) + sum by (cluster) (apiserver_request:burnrate5m) > (14.40 * 0.01000) + for: 2m + labels: + long: 1h + severity: critical + short: 5m + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum by (cluster) (apiserver_request:burnrate6h) > (6.00 * 0.01000) + and on (cluster) + sum by (cluster) (apiserver_request:burnrate30m) > (6.00 * 0.01000) + for: 15m + labels: + long: 6h + severity: critical + short: 30m + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum by (cluster) (apiserver_request:burnrate1d) > (3.00 * 0.01000) + and on (cluster) + sum by (cluster) (apiserver_request:burnrate2h) > (3.00 * 0.01000) + for: 1h + labels: + long: 1d + severity: warning + short: 2h + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum by (cluster) (apiserver_request:burnrate3d) > (1.00 * 0.01000) + and on (cluster) + sum by (cluster) (apiserver_request:burnrate6h) > (1.00 * 0.01000) + for: 3h + labels: + long: 3d + severity: warning + short: 6h +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-prometheus-general.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kube-prometheus-general.rules + rules: + - expr: count without(instance, pod, node) (up == 1) + record: count:up1 + - expr: count without(instance, pod, node) (up == 0) + record: count:up0 +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-prometheus-node-recording.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kube-prometheus-node-recording.rules + rules: + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) BY (instance) + record: instance:node_cpu:rate:sum + - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) + record: instance:node_network_receive_bytes:rate:sum + - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) + record: instance:node_network_transmit_bytes:rate:sum + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) WITHOUT (cpu, mode) / ON (instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance) + record: instance:node_cpu:ratio + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) + record: cluster:node_cpu:sum_rate5m + - expr: cluster:node_cpu:sum_rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu)) + record: cluster:node_cpu:ratio +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-state-metrics.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kube-state-metrics + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kube-state-metrics + rules: + - alert: KubeStateMetricsListErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricslisterrors + summary: kube-state-metrics is experiencing errors in list operations. + expr: |- + (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) by (cluster) + / + sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m])) by (cluster)) + > 0.01 + for: 15m + labels: + severity: critical + - alert: KubeStateMetricsWatchErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricswatcherrors + summary: kube-state-metrics is experiencing errors in watch operations. + expr: |- + (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) by (cluster) + / + sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m])) by (cluster)) + > 0.01 + for: 15m + labels: + severity: critical + - alert: KubeStateMetricsShardingMismatch + annotations: + description: kube-state-metrics pods are running with different --total-shards configuration, some Kubernetes objects may be exposed multiple times or not exposed at all. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardingmismatch + summary: kube-state-metrics sharding is misconfigured. + expr: stdvar (kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) != 0 + for: 15m + labels: + severity: critical + - alert: KubeStateMetricsShardsMissing + annotations: + description: kube-state-metrics shards are missing, some Kubernetes objects are not being exposed. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardsmissing + summary: kube-state-metrics shards are missing. + expr: |- + 2^max(kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) - 1 + - + sum( 2 ^ max by (cluster, shard_ordinal) (kube_state_metrics_shard_ordinal{job="kube-state-metrics"}) ) by (cluster) + != 0 + for: 15m + labels: + severity: critical +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubelet.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubelet.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubelet.rules + rules: + - expr: |- + histogram_quantile( + 0.99, + sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) + * on (cluster, instance) group_left (node) + max by (cluster, instance, node) (kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + ) + labels: + quantile: '0.99' + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: |- + histogram_quantile( + 0.9, + sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) + * on (cluster, instance) group_left (node) + max by (cluster, instance, node) (kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + ) + labels: + quantile: '0.9' + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: |- + histogram_quantile( + 0.5, + sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) + * on (cluster, instance) group_left (node) + max by (cluster, instance, node) (kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + ) + labels: + quantile: '0.5' + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-apps.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubernetes-apps + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubernetes-apps + rules: + - alert: KubePodCrashLooping + annotations: + description: 'Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is in waiting state (reason: "CrashLoopBackOff") on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodcrashlooping + summary: Pod is crash looping. + expr: max_over_time(kube_pod_container_status_waiting_reason{reason="CrashLoopBackOff", job="kube-state-metrics", namespace=~".*"}[5m]) >= 1 + for: 15m + labels: + severity: warning + - alert: KubePodNotReady + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodnotready + summary: Pod has been in a non-ready state for more than 15 minutes. + expr: |- + sum by (namespace, pod, cluster) ( + max by (namespace, pod, cluster) ( + kube_pod_status_phase{job="kube-state-metrics", namespace=~".*", phase=~"Pending|Unknown"} + ) * on (namespace, pod, cluster) group_left(owner_kind) topk by (namespace, pod, cluster) ( + 1, max by (namespace, pod, owner_kind, cluster) (kube_pod_owner{owner_kind!="Job"}) + ) + ) > 0 + for: 15m + labels: + severity: warning + - alert: KubeDeploymentGenerationMismatch + annotations: + description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentgenerationmismatch + summary: Deployment generation mismatch due to possible roll-back + expr: |- + kube_deployment_status_observed_generation{job="kube-state-metrics", namespace=~".*"} + != + kube_deployment_metadata_generation{job="kube-state-metrics", namespace=~".*"} + for: 15m + labels: + severity: warning + - alert: KubeDeploymentReplicasMismatch + annotations: + description: Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has not matched the expected number of replicas for longer than 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentreplicasmismatch + summary: Deployment has not matched the expected number of replicas. + expr: |- + ( + kube_deployment_spec_replicas{job="kube-state-metrics", namespace=~".*"} + > + kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~".*"} + ) and ( + changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}[10m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeDeploymentRolloutStuck + annotations: + description: Rollout of deployment {{ $labels.namespace }}/{{ $labels.deployment }} is not progressing for longer than 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentrolloutstuck + summary: Deployment rollout is not progressing. + expr: |- + kube_deployment_status_condition{condition="Progressing", status="false",job="kube-state-metrics", namespace=~".*"} + != 0 + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetReplicasMismatch + annotations: + description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetreplicasmismatch + summary: StatefulSet has not matched the expected number of replicas. + expr: |- + ( + kube_statefulset_status_replicas_ready{job="kube-state-metrics", namespace=~".*"} + != + kube_statefulset_replicas{job="kube-state-metrics", namespace=~".*"} + ) and ( + changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}[10m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetGenerationMismatch + annotations: + description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetgenerationmismatch + summary: StatefulSet generation mismatch due to possible roll-back + expr: |- + kube_statefulset_status_observed_generation{job="kube-state-metrics", namespace=~".*"} + != + kube_statefulset_metadata_generation{job="kube-state-metrics", namespace=~".*"} + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetUpdateNotRolledOut + annotations: + description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetupdatenotrolledout + summary: StatefulSet update has not been rolled out. + expr: |- + ( + max by (namespace, statefulset, job, cluster) ( + kube_statefulset_status_current_revision{job="kube-state-metrics", namespace=~".*"} + unless + kube_statefulset_status_update_revision{job="kube-state-metrics", namespace=~".*"} + ) + * on (namespace, statefulset, job, cluster) + ( + kube_statefulset_replicas{job="kube-state-metrics", namespace=~".*"} + != + kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~".*"} + ) + ) and on (namespace, statefulset, job, cluster) ( + changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeDaemonSetRolloutStuck + annotations: + description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15m on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck + summary: DaemonSet rollout is stuck. + expr: |- + ( + ( + kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~".*"} + != + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"} + ) or ( + kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~".*"} + != + 0 + ) or ( + kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics", namespace=~".*"} + != + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"} + ) or ( + kube_daemonset_status_number_available{job="kube-state-metrics", namespace=~".*"} + != + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"} + ) + ) and ( + changes(kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics", namespace=~".*"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeContainerWaiting + annotations: + description: 'pod/{{ $labels.pod }} in namespace {{ $labels.namespace }} on container {{ $labels.container}} has been in waiting state for longer than 1 hour. (reason: "{{ $labels.reason }}") on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecontainerwaiting + summary: Pod container waiting longer than 1 hour + expr: kube_pod_container_status_waiting_reason{reason!="CrashLoopBackOff", job="kube-state-metrics", namespace=~".*"} > 0 + for: 1h + labels: + severity: warning + - alert: KubeDaemonSetNotScheduled + annotations: + description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetnotscheduled + summary: DaemonSet pods are not scheduled. + expr: |- + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"} + - + kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~".*"} > 0 + for: 10m + labels: + severity: warning + - alert: KubeDaemonSetMisScheduled + annotations: + description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetmisscheduled + summary: DaemonSet pods are misscheduled. + expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~".*"} > 0 + for: 15m + labels: + severity: warning + - alert: KubeJobNotCompleted + annotations: + description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than {{ "43200" | humanizeDuration }} to complete on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubejobnotcompleted + summary: Job did not complete in time + expr: |- + time() - max by (namespace, job_name, cluster) (kube_job_status_start_time{job="kube-state-metrics", namespace=~".*"} + and + kube_job_status_active{job="kube-state-metrics", namespace=~".*"} > 0) > 43200 + labels: + severity: warning + - alert: KubeJobFailed + annotations: + description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. Removing failed job after investigation should clear this alert on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubejobfailed + summary: Job failed to complete. + expr: kube_job_failed{job="kube-state-metrics", namespace=~".*"} > 0 + for: 15m + labels: + severity: warning + - alert: KubeHpaReplicasMismatch + annotations: + description: HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has not matched the desired number of replicas for longer than 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpareplicasmismatch + summary: HPA has not matched desired number of replicas. + expr: |- + (kube_horizontalpodautoscaler_status_desired_replicas{job="kube-state-metrics", namespace=~".*"} + != + kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"}) + and + (kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"} + > + kube_horizontalpodautoscaler_spec_min_replicas{job="kube-state-metrics", namespace=~".*"}) + and + (kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"} + < + kube_horizontalpodautoscaler_spec_max_replicas{job="kube-state-metrics", namespace=~".*"}) + and + changes(kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"}[15m]) == 0 + for: 15m + labels: + severity: warning + - alert: KubeHpaMaxedOut + annotations: + description: HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has been running at max replicas for longer than 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpamaxedout + summary: HPA is running at max replicas + expr: |- + kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"} + == + kube_horizontalpodautoscaler_spec_max_replicas{job="kube-state-metrics", namespace=~".*"} + for: 15m + labels: + severity: warning + - alert: KubePdbNotEnoughHealthyPods + annotations: + description: PDB {{ $labels.cluster }}/{{ $labels.namespace }}/{{ $labels.poddisruptionbudget }} expects {{ $value }} more healthy pods. The desired number of healthy pods has not been met for at least 15m. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepdbnotenoughhealthypods + summary: PDB does not have enough healthy pods. + expr: |- + ( + kube_poddisruptionbudget_status_desired_healthy{job="kube-state-metrics", namespace=~".*"} + - + kube_poddisruptionbudget_status_current_healthy{job="kube-state-metrics", namespace=~".*"} + ) + > 0 + for: 15m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-resources.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubernetes-resources + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubernetes-resources + rules: + - alert: KubeCPUOvercommit + annotations: + description: Cluster {{ $labels.cluster }} has overcommitted CPU resource requests for Pods by {{ printf "%.2f" $value }} CPU shares and cannot tolerate node failure. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit + summary: Cluster has overcommitted CPU resource requests. + expr: |- + # Non-HA clusters. + ( + ( + sum by (cluster) (namespace_cpu:kube_pod_container_resource_requests:sum{}) + - + sum by (cluster) (kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) > 0 + ) + and + count by (cluster) (max by (cluster, node) (kube_node_role{job="kube-state-metrics", role="control-plane"})) < 3 + ) + or + # HA clusters. + ( + sum by (cluster) (namespace_cpu:kube_pod_container_resource_requests:sum{}) + - + ( + # Skip clusters with only one allocatable node. + ( + sum by (cluster) (kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) + - + max by (cluster) (kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) + ) > 0 + ) > 0 + ) + for: 10m + labels: + severity: warning + - alert: KubeMemoryOvercommit + annotations: + description: Cluster {{ $labels.cluster }} has overcommitted memory resource requests for Pods by {{ $value | humanize }} bytes and cannot tolerate node failure. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit + summary: Cluster has overcommitted memory resource requests. + expr: |- + # Non-HA clusters. + ( + ( + sum by (cluster) (namespace_memory:kube_pod_container_resource_requests:sum{}) + - + sum by (cluster) (kube_node_status_allocatable{job="kube-state-metrics",resource="memory"}) > 0 + ) + and + count by (cluster) (max by (cluster, node) (kube_node_role{job="kube-state-metrics", role="control-plane"})) < 3 + ) + or + # HA clusters. + ( + sum by (cluster) (namespace_memory:kube_pod_container_resource_requests:sum{}) + - + ( + # Skip clusters with only one allocatable node. + ( + sum by (cluster) (kube_node_status_allocatable{job="kube-state-metrics",resource="memory"}) + - + max by (cluster) (kube_node_status_allocatable{job="kube-state-metrics",resource="memory"}) + ) > 0 + ) > 0 + ) + for: 10m + labels: + severity: warning + - alert: KubeCPUQuotaOvercommit + annotations: + description: Cluster {{ $labels.cluster }} has overcommitted CPU resource requests for Namespaces. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuquotaovercommit + summary: Cluster has overcommitted CPU resource requests. + expr: |- + sum by (cluster) ( + min without(resource) (kube_resourcequota{job="kube-state-metrics", type="hard", resource=~"(cpu|requests.cpu)"}) + ) + / + sum by (cluster) ( + kube_node_status_allocatable{resource="cpu", job="kube-state-metrics"} + ) > 1.5 + for: 5m + labels: + severity: warning + - alert: KubeMemoryQuotaOvercommit + annotations: + description: Cluster {{ $labels.cluster }} has overcommitted memory resource requests for Namespaces. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryquotaovercommit + summary: Cluster has overcommitted memory resource requests. + expr: |- + sum by (cluster) ( + min without(resource) (kube_resourcequota{job="kube-state-metrics", type="hard", resource=~"(memory|requests.memory)"}) + ) + / + sum by (cluster) ( + kube_node_status_allocatable{resource="memory", job="kube-state-metrics"} + ) > 1.5 + for: 5m + labels: + severity: warning + - alert: KubeQuotaAlmostFull + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotaalmostfull + summary: Namespace quota is going to be full. + expr: |- + max without (instance, job, type) ( + kube_resourcequota{job="kube-state-metrics", type="used"} + ) + / on (cluster, namespace, resource, resourcequota) group_left() + ( + max without (instance, job, type) ( + kube_resourcequota{job="kube-state-metrics", type="hard"} + ) > 0 + ) + > 0.9 < 1 + for: 15m + labels: + severity: info + - alert: KubeQuotaFullyUsed + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotafullyused + summary: Namespace quota is fully used. + expr: |- + max without (instance, job, type) ( + kube_resourcequota{job="kube-state-metrics", type="used"} + ) + / on (cluster, namespace, resource, resourcequota) group_left() + ( + max without (instance, job, type) ( + kube_resourcequota{job="kube-state-metrics", type="hard"} + ) > 0 + ) + == 1 + for: 15m + labels: + severity: info + - alert: KubeQuotaExceeded + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotaexceeded + summary: Namespace quota has exceeded the limits. + expr: |- + max without (instance, job, type) ( + kube_resourcequota{job="kube-state-metrics", type="used"} + ) + / on (cluster, namespace, resource, resourcequota) group_left() + ( + max without (instance, job, type) ( + kube_resourcequota{job="kube-state-metrics", type="hard"} + ) > 0 + ) > 1 + for: 15m + labels: + severity: warning + - alert: CPUThrottlingHigh + annotations: + description: '{{ $value | humanizePercentage }} throttling of CPU in namespace {{ $labels.namespace }} for container {{ $labels.container }} in pod {{ $labels.pod }} on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/cputhrottlinghigh + summary: Processes experience elevated CPU throttling. + expr: |- + sum without (id, metrics_path, name, image, endpoint, job, node) ( + topk by (cluster, namespace, pod, container, instance) (1, + increase( + container_cpu_cfs_throttled_periods_total{container!="", job="kubelet", metrics_path="/metrics/cadvisor", } + [5m]) + ) + ) + / on (cluster, namespace, pod, container, instance) group_left + sum without (id, metrics_path, name, image, endpoint, job, node) ( + topk by (cluster, namespace, pod, container, instance) (1, + increase( + container_cpu_cfs_periods_total{job="kubelet", metrics_path="/metrics/cadvisor", } + [5m]) + ) + ) + > ( 25 / 100 ) + for: 15m + labels: + severity: info +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-storage.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubernetes-storage + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubernetes-storage + rules: + - alert: KubePersistentVolumeFillingUp + annotations: + description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} is only {{ $value | humanizePercentage }} free. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup + summary: PersistentVolume is filling up. + expr: |- + ( + kubelet_volume_stats_available_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} + / + kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} + ) < 0.03 + and + kubelet_volume_stats_used_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1 + for: 1m + labels: + severity: critical + - alert: KubePersistentVolumeFillingUp + annotations: + description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup + summary: PersistentVolume is filling up. + expr: |- + ( + kubelet_volume_stats_available_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} + / + kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} + ) < 0.15 + and + kubelet_volume_stats_used_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0 + and + predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1 + for: 1h + labels: + severity: warning + - alert: KubePersistentVolumeInodesFillingUp + annotations: + description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} only has {{ $value | humanizePercentage }} free inodes. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup + summary: PersistentVolumeInodes are filling up. + expr: |- + ( + kubelet_volume_stats_inodes_free{job="kubelet", namespace=~".*", metrics_path="/metrics"} + / + kubelet_volume_stats_inodes{job="kubelet", namespace=~".*", metrics_path="/metrics"} + ) < 0.03 + and + kubelet_volume_stats_inodes_used{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1 + for: 1m + labels: + severity: critical + - alert: KubePersistentVolumeInodesFillingUp + annotations: + description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} is expected to run out of inodes within four days. Currently {{ $value | humanizePercentage }} of its inodes are free. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup + summary: PersistentVolumeInodes are filling up. + expr: |- + ( + kubelet_volume_stats_inodes_free{job="kubelet", namespace=~".*", metrics_path="/metrics"} + / + kubelet_volume_stats_inodes{job="kubelet", namespace=~".*", metrics_path="/metrics"} + ) < 0.15 + and + kubelet_volume_stats_inodes_used{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0 + and + predict_linear(kubelet_volume_stats_inodes_free{job="kubelet", namespace=~".*", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1 + unless on (cluster, namespace, persistentvolumeclaim) + kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1 + for: 1h + labels: + severity: warning + - alert: KubePersistentVolumeErrors + annotations: + description: The persistent volume {{ $labels.persistentvolume }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} has status {{ $labels.phase }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeerrors + summary: PersistentVolume is having issues with provisioning. + expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0 + for: 5m + labels: + severity: critical +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubernetes-system-apiserver + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubernetes-system-apiserver + rules: + - alert: KubeClientCertificateExpiration + annotations: + description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration + summary: Client certificate is about to expire. + expr: |- + histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 + and + on (job, cluster, instance) apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 + for: 5m + labels: + severity: warning + - alert: KubeClientCertificateExpiration + annotations: + description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration + summary: Client certificate is about to expire. + expr: |- + histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 + and + on (job, cluster, instance) apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 + for: 5m + labels: + severity: critical + - alert: KubeAggregatedAPIErrors + annotations: + description: Kubernetes aggregated API {{ $labels.instance }}/{{ $labels.name }} has reported {{ $labels.reason }} errors on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeaggregatedapierrors + summary: Kubernetes aggregated API has reported errors. + expr: sum by (cluster, instance, name, reason)(increase(aggregator_unavailable_apiservice_total{job="apiserver"}[1m])) > 0 + for: 10m + labels: + severity: warning + - alert: KubeAggregatedAPIDown + annotations: + description: Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been only {{ $value | humanize }}% available over the last 10m on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeaggregatedapidown + summary: Kubernetes aggregated API is down. + expr: (1 - max by (name, namespace, cluster)(avg_over_time(aggregator_unavailable_apiservice{job="apiserver"}[10m]))) * 100 < 85 + for: 5m + labels: + severity: warning + - alert: KubeAPIDown + annotations: + description: KubeAPI has disappeared from Prometheus target discovery. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapidown + summary: Target disappeared from Prometheus target discovery. + expr: absent(up{job="apiserver"}) + for: 15m + labels: + severity: critical + - alert: KubeAPITerminatedRequests + annotations: + description: The kubernetes apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapiterminatedrequests + summary: The kubernetes apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. + expr: sum by (cluster) (rate(apiserver_request_terminations_total{job="apiserver"}[10m])) / ( sum by (cluster) (rate(apiserver_request_total{job="apiserver"}[10m])) + sum by (cluster) (rate(apiserver_request_terminations_total{job="apiserver"}[10m])) ) > 0.20 + for: 5m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubernetes-system-kubelet + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubernetes-system-kubelet + rules: + - alert: KubeNodeNotReady + annotations: + description: '{{ $labels.node }} has been unready for more than 15 minutes on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodenotready + summary: Node is not ready. + expr: |- + kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 + and on (cluster, node) + kube_node_spec_unschedulable{job="kube-state-metrics"} == 0 + for: 15m + labels: + severity: warning + - alert: KubeNodePressure + annotations: + description: '{{ $labels.node }} on cluster {{ $labels.cluster }} has active Condition {{ $labels.condition }}. This is caused by resource usage exceeding eviction thresholds.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodepressure + summary: Node has as active Condition. + expr: |- + kube_node_status_condition{job="kube-state-metrics",condition=~"(MemoryPressure|DiskPressure|PIDPressure)",status="true"} == 1 + and on (cluster, node) + kube_node_spec_unschedulable{job="kube-state-metrics"} == 0 + for: 10m + labels: + severity: info + - alert: KubeNodeUnreachable + annotations: + description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled on cluster {{ $labels.cluster }}.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodeunreachable + summary: Node is unreachable. + expr: (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1 + for: 15m + labels: + severity: warning + - alert: KubeletTooManyPods + annotations: + description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubelettoomanypods + summary: Kubelet is running at capacity. + expr: |- + ( + max by (cluster, instance) ( + kubelet_running_pods{job="kubelet", metrics_path="/metrics"} > 1 + ) + * on (cluster, instance) group_left(node) + max by (cluster, instance, node) ( + kubelet_node_name{job="kubelet", metrics_path="/metrics"} + ) + ) + / on (cluster, node) group_left() + max by (cluster, node) ( + kube_node_status_capacity{job="kube-state-metrics", resource="pods"} != 1 + ) > 0.95 + for: 15m + labels: + severity: info + - alert: KubeNodeReadinessFlapping + annotations: + description: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodereadinessflapping + summary: Node readiness status is flapping. + expr: |- + sum(changes(kube_node_status_condition{job="kube-state-metrics",status="true",condition="Ready"}[15m])) by (cluster, node) > 2 + and on (cluster, node) + kube_node_spec_unschedulable{job="kube-state-metrics"} == 0 + for: 15m + labels: + severity: warning + - alert: KubeNodeEviction + annotations: + description: Node {{ $labels.node }} on {{ $labels.cluster }} is evicting Pods due to {{ $labels.eviction_signal }}. Eviction occurs when eviction thresholds are crossed, typically caused by Pods exceeding RAM/ephemeral-storage limits. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodeeviction + summary: Node is evicting pods. + expr: |- + sum(rate(kubelet_evictions{job="kubelet", metrics_path="/metrics"}[15m])) by (cluster, eviction_signal, instance) + * on (cluster, instance) group_left(node) + max by (cluster, instance, node) ( + kubelet_node_name{job="kubelet", metrics_path="/metrics"} + ) + > 0 + for: 0s + labels: + severity: info + - alert: KubeletPlegDurationHigh + annotations: + description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }} on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletplegdurationhigh + summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist. + expr: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10 + for: 5m + labels: + severity: warning + - alert: KubeletPodStartUpLatencyHigh + annotations: + description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }} on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletpodstartuplatencyhigh + summary: Kubelet Pod startup latency is too high. + expr: |- + histogram_quantile(0.99, + sum by (cluster, instance, le) ( + topk by (cluster, instance, le, operation_type) (1, + rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m]) + ) + ) + ) + * on (cluster, instance) group_left(node) + topk by (cluster, instance, node) (1, + kubelet_node_name{job="kubelet", metrics_path="/metrics"} + ) + > 60 + for: 15m + labels: + severity: warning + - alert: KubeletClientCertificateExpiration + annotations: + description: Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration + summary: Kubelet client certificate is about to expire. + expr: kubelet_certificate_manager_client_ttl_seconds < 604800 + labels: + severity: warning + - alert: KubeletClientCertificateExpiration + annotations: + description: Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration + summary: Kubelet client certificate is about to expire. + expr: kubelet_certificate_manager_client_ttl_seconds < 86400 + labels: + severity: critical + - alert: KubeletServerCertificateExpiration + annotations: + description: Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration + summary: Kubelet server certificate is about to expire. + expr: kubelet_certificate_manager_server_ttl_seconds < 604800 + labels: + severity: warning + - alert: KubeletServerCertificateExpiration + annotations: + description: Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }} on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration + summary: Kubelet server certificate is about to expire. + expr: kubelet_certificate_manager_server_ttl_seconds < 86400 + labels: + severity: critical + - alert: KubeletClientCertificateRenewalErrors + annotations: + description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes) on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificaterenewalerrors + summary: Kubelet has failed to renew its client certificate. + expr: increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: KubeletServerCertificateRenewalErrors + annotations: + description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes) on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificaterenewalerrors + summary: Kubelet has failed to renew its server certificate. + expr: increase(kubelet_server_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: KubeletDown + annotations: + description: Kubelet has disappeared from Prometheus target discovery. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletdown + summary: Target disappeared from Prometheus target discovery. + expr: absent(up{job="kubelet", metrics_path="/metrics"}) + for: 15m + labels: + severity: critical +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-kubernetes-system + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: kubernetes-system + rules: + - alert: KubeVersionMismatch + annotations: + description: There are {{ $value }} different semantic versions of Kubernetes components running on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeversionmismatch + summary: Different semantic versions of Kubernetes components running. + expr: count by (cluster) (count by (git_version, cluster) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"git_version","$1","git_version","(v[0-9]*.[0-9]*).*"))) > 1 + for: 15m + labels: + severity: warning + - alert: KubeClientErrors + annotations: + description: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors on cluster {{ $labels.cluster }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclienterrors + summary: Kubernetes API server client is experiencing errors. + expr: |- + (sum(rate(rest_client_requests_total{job="apiserver",code=~"5.."}[5m])) by (cluster, instance, job, namespace) + / + sum(rate(rest_client_requests_total{job="apiserver"}[5m])) by (cluster, instance, job, namespace)) + > 0.01 + for: 15m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-node-exporter.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: node-exporter.rules + rules: + - expr: |- + count without (cpu, mode) ( + node_cpu_seconds_total{job="node-exporter",mode="idle"} + ) + record: instance:node_num_cpu:sum + - expr: |- + 1 - avg without (cpu) ( + sum without (mode) (rate(node_cpu_seconds_total{job="node-exporter", mode=~"idle|iowait|steal"}[5m])) + ) + record: instance:node_cpu_utilisation:rate5m + - expr: |- + ( + node_load1{job="node-exporter"} + / + instance:node_num_cpu:sum{job="node-exporter"} + ) + record: instance:node_load1_per_cpu:ratio + - expr: |- + 1 - ( + ( + node_memory_MemAvailable_bytes{job="node-exporter"} + or + ( + node_memory_Buffers_bytes{job="node-exporter"} + + + node_memory_Cached_bytes{job="node-exporter"} + + + node_memory_MemFree_bytes{job="node-exporter"} + + + node_memory_Slab_bytes{job="node-exporter"} + ) + ) + / + node_memory_MemTotal_bytes{job="node-exporter"} + ) + record: instance:node_memory_utilisation:ratio + - expr: rate(node_vmstat_pgmajfault{job="node-exporter"}[5m]) + record: instance:node_vmstat_pgmajfault:rate5m + - expr: rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m]) + record: instance_device:node_disk_io_time_seconds:rate5m + - expr: rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m]) + record: instance_device:node_disk_io_time_weighted_seconds:rate5m + - expr: |- + sum without (device) ( + rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[5m]) + ) + record: instance:node_network_receive_bytes_excluding_lo:rate5m + - expr: |- + sum without (device) ( + rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[5m]) + ) + record: instance:node_network_transmit_bytes_excluding_lo:rate5m + - expr: |- + sum without (device) ( + rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[5m]) + ) + record: instance:node_network_receive_drop_excluding_lo:rate5m + - expr: |- + sum without (device) ( + rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[5m]) + ) + record: instance:node_network_transmit_drop_excluding_lo:rate5m +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-node-exporter + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: node-exporter + rules: + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup + summary: Filesystem is predicted to run out of space within the next 24 hours. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 15 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup + summary: Filesystem is predicted to run out of space within the next 4 hours. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 10 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace + summary: Filesystem has less than 5% space left. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 30m + labels: + severity: warning + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace + summary: Filesystem has less than 3% space left. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 30m + labels: + severity: critical + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup + summary: Filesystem is predicted to run out of inodes within the next 24 hours. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 40 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup + summary: Filesystem is predicted to run out of inodes within the next 4 hours. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 20 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles + summary: Filesystem has less than 5% inodes left. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles + summary: Filesystem has less than 3% inodes left. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeNetworkReceiveErrs + annotations: + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodenetworkreceiveerrs + summary: Network interface is reporting many receive errors. + expr: rate(node_network_receive_errs_total{job="node-exporter"}[2m]) / rate(node_network_receive_packets_total{job="node-exporter"}[2m]) > 0.01 + for: 1h + labels: + severity: warning + - alert: NodeNetworkTransmitErrs + annotations: + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodenetworktransmiterrs + summary: Network interface is reporting many transmit errors. + expr: rate(node_network_transmit_errs_total{job="node-exporter"}[2m]) / rate(node_network_transmit_packets_total{job="node-exporter"}[2m]) > 0.01 + for: 1h + labels: + severity: warning + - alert: NodeHighNumberConntrackEntriesUsed + annotations: + description: '{{ $labels.instance }} {{ $value | humanizePercentage }} of conntrack entries are used.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodehighnumberconntrackentriesused + summary: Number of conntrack are getting close to the limit. + expr: (node_nf_conntrack_entries{job="node-exporter"} / node_nf_conntrack_entries_limit) > 0.75 + labels: + severity: warning + - alert: NodeTextFileCollectorScrapeError + annotations: + description: Node Exporter text file collector on {{ $labels.instance }} failed to scrape. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodetextfilecollectorscrapeerror + summary: Node Exporter text file collector failed to scrape. + expr: node_textfile_scrape_error{job="node-exporter"} == 1 + labels: + severity: warning + - alert: NodeClockSkewDetected + annotations: + description: Clock at {{ $labels.instance }} is out of sync by more than 0.05s. Ensure NTP is configured correctly on this host. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodeclockskewdetected + summary: Clock skew detected. + expr: |- + ( + node_timex_offset_seconds{job="node-exporter"} > 0.05 + and + deriv(node_timex_offset_seconds{job="node-exporter"}[5m]) >= 0 + ) + or + ( + node_timex_offset_seconds{job="node-exporter"} < -0.05 + and + deriv(node_timex_offset_seconds{job="node-exporter"}[5m]) <= 0 + ) + for: 10m + labels: + severity: warning + - alert: NodeClockNotSynchronising + annotations: + description: Clock at {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodeclocknotsynchronising + summary: Clock not synchronising. + expr: |- + min_over_time(node_timex_sync_status{job="node-exporter"}[5m]) == 0 + and + node_timex_maxerror_seconds{job="node-exporter"} >= 16 + for: 10m + labels: + severity: warning + - alert: NodeRAIDDegraded + annotations: + description: RAID array '{{ $labels.device }}' at {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddegraded + summary: RAID Array is degraded. + expr: node_md_disks_required{job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"} - ignoring (state) (node_md_disks{state="active",job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}) > 0 + for: 15m + labels: + severity: critical + - alert: NodeRAIDDiskFailure + annotations: + description: At least one device in RAID array at {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddiskfailure + summary: Failed device in RAID array. + expr: node_md_disks{state="failed",job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"} > 0 + labels: + severity: warning + - alert: NodeFileDescriptorLimit + annotations: + description: File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefiledescriptorlimit + summary: Kernel is predicted to exhaust file descriptors limit soon. + expr: |- + ( + node_filefd_allocated{job="node-exporter"} * 100 / node_filefd_maximum{job="node-exporter"} > 70 + ) + for: 15m + labels: + severity: warning + - alert: NodeFileDescriptorLimit + annotations: + description: File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefiledescriptorlimit + summary: Kernel is predicted to exhaust file descriptors limit soon. + expr: |- + ( + node_filefd_allocated{job="node-exporter"} * 100 / node_filefd_maximum{job="node-exporter"} > 90 + ) + for: 15m + labels: + severity: critical + - alert: NodeCPUHighUsage + annotations: + description: 'CPU usage at {{ $labels.instance }} has been above 90% for the last 15 minutes, is currently at {{ printf "%.2f" $value }}%. + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodecpuhighusage + summary: High CPU usage. + expr: sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{job="node-exporter", mode!~"idle|iowait"}[2m]))) * 100 > 90 + for: 15m + labels: + severity: info + - alert: NodeSystemSaturation + annotations: + description: 'System load per core at {{ $labels.instance }} has been above 2 for the last 15 minutes, is currently at {{ printf "%.2f" $value }}. + + This might indicate this instance resources saturation and can cause it becoming unresponsive. + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodesystemsaturation + summary: System saturated, load per core is very high. + expr: |- + node_load1{job="node-exporter"} + / count without (cpu, mode) (node_cpu_seconds_total{job="node-exporter", mode="idle"}) > 2 + for: 15m + labels: + severity: warning + - alert: NodeMemoryMajorPagesFaults + annotations: + description: 'Memory major pages are occurring at very high rate at {{ $labels.instance }}, 500 major page faults per second for the last 15 minutes, is currently at {{ printf "%.2f" $value }}. + + Please check that there is enough memory available at this instance. + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodememorymajorpagesfaults + summary: Memory major page faults are occurring at very high rate. + expr: rate(node_vmstat_pgmajfault{job="node-exporter"}[5m]) > 500 + for: 15m + labels: + severity: warning + - alert: NodeMemoryHighUtilization + annotations: + description: 'Memory is filling up at {{ $labels.instance }}, has been above 90% for the last 15 minutes, is currently at {{ printf "%.2f" $value }}%. + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodememoryhighutilization + summary: Host is running out of memory. + expr: 100 - (node_memory_MemAvailable_bytes{job="node-exporter"} / node_memory_MemTotal_bytes{job="node-exporter"} * 100) > 90 + for: 15m + labels: + severity: warning + - alert: NodeDiskIOSaturation + annotations: + description: 'Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above 10 for the last 30 minutes, is currently at {{ printf "%.2f" $value }}. + + This symptom might indicate disk saturation. + + ' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodediskiosaturation + summary: Disk IO queue is high. + expr: rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m]) > 10 + for: 30m + labels: + severity: warning + - alert: NodeSystemdServiceFailed + annotations: + description: Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }} + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodesystemdservicefailed + summary: Systemd service has entered failed state. + expr: node_systemd_unit_state{job="node-exporter", state="failed"} == 1 + for: 5m + labels: + severity: warning + - alert: NodeSystemdServiceCrashlooping + annotations: + description: Systemd service {{ $labels.name }} has being restarted too many times at {{ $labels.instance }} for the last 15 minutes. Please check if service is crash looping. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodesystemdservicecrashlooping + summary: Systemd service keeps restaring, possibly crash looping. + expr: increase(node_systemd_service_restart_total{job="node-exporter"}[5m]) > 2 + for: 15m + labels: + severity: warning + - alert: NodeBondingDegraded + annotations: + description: Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodebondingdegraded + summary: Bonding interface is degraded. + expr: (node_bonding_slaves{job="node-exporter"} - node_bonding_active{job="node-exporter"}) != 0 + for: 5m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/node-network.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-node-network + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: node-network + rules: + - alert: NodeNetworkInterfaceFlapping + annotations: + description: Network interface "{{ $labels.device }}" changing its up status often on node-exporter {{ $labels.namespace }}/{{ $labels.pod }} + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/nodenetworkinterfaceflapping + summary: Network interface is often changing its status + expr: changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2 + for: 2m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/node.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-node.rules + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: node.rules + rules: + - expr: |- + topk by (cluster, namespace, pod) (1, + max by (cluster, node, namespace, pod) ( + label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") + )) + record: 'node_namespace_pod:kube_pod_info:' + - expr: |- + count by (cluster, node) ( + node_cpu_seconds_total{mode="idle",job="node-exporter"} + * on (cluster, namespace, pod) group_left(node) + topk by (cluster, namespace, pod) (1, node_namespace_pod:kube_pod_info:) + ) + record: node:node_num_cpu:sum + - expr: |- + sum( + node_memory_MemAvailable_bytes{job="node-exporter"} or + ( + node_memory_Buffers_bytes{job="node-exporter"} + + node_memory_Cached_bytes{job="node-exporter"} + + node_memory_MemFree_bytes{job="node-exporter"} + + node_memory_Slab_bytes{job="node-exporter"} + ) + ) by (cluster) + record: :node_memory_MemAvailable_bytes:sum + - expr: |- + avg by (cluster, node) ( + sum without (mode) ( + rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal",job="node-exporter"}[5m]) + ) + ) + record: node:node_cpu_utilization:ratio_rate5m + - expr: |- + avg by (cluster) ( + node:node_cpu_utilization:ratio_rate5m + ) + record: cluster:node_cpu:ratio_rate5m +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/prometheus-operator.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-prometheus-operator + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: prometheus-operator + rules: + - alert: PrometheusOperatorListErrors + annotations: + description: Errors while performing List operations in controller {{$labels.controller}} in {{$labels.namespace}} namespace. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorlisterrors + summary: Errors while performing list operations in controller. + expr: (sum by (cluster,controller,namespace) (rate(prometheus_operator_list_operations_failed_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[10m])) / sum by (cluster,controller,namespace) (rate(prometheus_operator_list_operations_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[10m]))) > 0.4 + for: 15m + labels: + severity: warning + - alert: PrometheusOperatorWatchErrors + annotations: + description: Errors while performing watch operations in controller {{$labels.controller}} in {{$labels.namespace}} namespace. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorwatcherrors + summary: Errors while performing watch operations in controller. + expr: (sum by (cluster,controller,namespace) (rate(prometheus_operator_watch_operations_failed_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m])) / sum by (cluster,controller,namespace) (rate(prometheus_operator_watch_operations_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]))) > 0.4 + for: 15m + labels: + severity: warning + - alert: PrometheusOperatorSyncFailed + annotations: + description: Controller {{ $labels.controller }} in {{ $labels.namespace }} namespace fails to reconcile {{ $value }} objects. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorsyncfailed + summary: Last controller reconciliation failed + expr: min_over_time(prometheus_operator_syncs{status="failed",job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 10m + labels: + severity: warning + - alert: PrometheusOperatorReconcileErrors + annotations: + description: '{{ $value | humanizePercentage }} of reconciling operations failed for {{ $labels.controller }} controller in {{ $labels.namespace }} namespace.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorreconcileerrors + summary: Errors while reconciling objects. + expr: (sum by (cluster,controller,namespace) (rate(prometheus_operator_reconcile_errors_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]))) / (sum by (cluster,controller,namespace) (rate(prometheus_operator_reconcile_operations_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]))) > 0.1 + for: 10m + labels: + severity: warning + - alert: PrometheusOperatorStatusUpdateErrors + annotations: + description: '{{ $value | humanizePercentage }} of status update operations failed for {{ $labels.controller }} controller in {{ $labels.namespace }} namespace.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorstatusupdateerrors + summary: Errors while updating objects status. + expr: (sum by (cluster,controller,namespace) (rate(prometheus_operator_status_update_errors_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]))) / (sum by (cluster,controller,namespace) (rate(prometheus_operator_status_update_operations_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]))) > 0.1 + for: 10m + labels: + severity: warning + - alert: PrometheusOperatorNodeLookupErrors + annotations: + description: Errors while reconciling Prometheus in {{ $labels.namespace }} Namespace. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatornodelookuperrors + summary: Errors while reconciling Prometheus. + expr: rate(prometheus_operator_node_address_lookup_errors_total{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]) > 0.1 + for: 10m + labels: + severity: warning + - alert: PrometheusOperatorNotReady + annotations: + description: Prometheus operator in {{ $labels.namespace }} namespace isn't ready to reconcile {{ $labels.controller }} resources. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatornotready + summary: Prometheus operator not ready + expr: min by (cluster,controller,namespace) (max_over_time(prometheus_operator_ready{job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]) == 0) + for: 5m + labels: + severity: warning + - alert: PrometheusOperatorRejectedResources + annotations: + description: Prometheus operator in {{ $labels.namespace }} namespace rejected {{ printf "%0.0f" $value }} {{ $labels.controller }}/{{ $labels.resource }} resources. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorrejectedresources + summary: Resources rejected by Prometheus operator + expr: min_over_time(prometheus_operator_managed_resources{state="rejected",job="kube-prometheus-stack-operator",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 5m + labels: + severity: warning +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/prometheus.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-stack-prometheus + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + groups: + - name: prometheus + rules: + - alert: PrometheusBadConfig + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusbadconfig + summary: Failed Prometheus configuration reload. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_config_last_reload_successful{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) == 0 + for: 10m + labels: + severity: critical + - alert: PrometheusSDRefreshFailure + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to refresh SD with mechanism {{$labels.mechanism}}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheussdrefreshfailure + summary: Failed Prometheus SD refresh. + expr: increase(prometheus_sd_refresh_failures_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[10m]) > 0 + for: 20m + labels: + severity: warning + - alert: PrometheusKubernetesListWatchFailures + annotations: + description: Kubernetes service discovery of Prometheus {{$labels.namespace}}/{{$labels.pod}} is experiencing {{ printf "%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuskuberneteslistwatchfailures + summary: Requests in Kubernetes SD are failing. + expr: increase(prometheus_sd_kubernetes_failures_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusNotificationQueueRunningFull + annotations: + description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotificationqueuerunningfull + summary: Prometheus alert notification queue predicted to run full in less than 30m. + expr: |- + # Without min_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + predict_linear(prometheus_notifications_queue_length{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m], 60 * 30) + > + min_over_time(prometheus_notifications_queue_capacity{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + ) + for: 15m + labels: + severity: warning + - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers + annotations: + description: '{{ printf "%.1f" $value }}% of alerts sent by Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}} were affected by errors.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers + summary: More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors. + expr: |- + ( + rate(prometheus_notifications_errors_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + / + rate(prometheus_notifications_sent_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + ) + * 100 + > 1 + for: 15m + labels: + severity: warning + - alert: PrometheusNotConnectedToAlertmanagers + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotconnectedtoalertmanagers + summary: Prometheus is not connected to any Alertmanagers. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_notifications_alertmanagers_discovered{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) < 1 + for: 10m + labels: + severity: warning + - alert: PrometheusTSDBReloadsFailing + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbreloadsfailing + summary: Prometheus has issues reloading blocks from disk. + expr: increase(prometheus_tsdb_reloads_failures_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[3h]) > 0 + for: 4h + labels: + severity: warning + - alert: PrometheusTSDBCompactionsFailing + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbcompactionsfailing + summary: Prometheus has issues compacting blocks. + expr: increase(prometheus_tsdb_compactions_failed_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[3h]) > 0 + for: 4h + labels: + severity: warning + - alert: PrometheusNotIngestingSamples + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotingestingsamples + summary: Prometheus is not ingesting samples. + expr: |- + ( + sum without(type) (rate(prometheus_tsdb_head_samples_appended_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m])) <= 0 + and + ( + sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}) > 0 + or + sum without(rule_group) (prometheus_rule_group_rules{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}) > 0 + ) + ) + for: 10m + labels: + severity: warning + - alert: PrometheusDuplicateTimestamps + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusduplicatetimestamps + summary: Prometheus is dropping samples with duplicate timestamps. + expr: rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 10m + labels: + severity: warning + - alert: PrometheusOutOfOrderTimestamps + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusoutofordertimestamps + summary: Prometheus drops samples with out-of-order timestamps. + expr: rate(prometheus_target_scrapes_sample_out_of_order_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 10m + labels: + severity: warning + - alert: PrometheusRemoteStorageFailures + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }} + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotestoragefailures + summary: Prometheus fails to send samples to remote storage. + expr: |- + ( + (rate(prometheus_remote_storage_failed_samples_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m])) + / + ( + (rate(prometheus_remote_storage_failed_samples_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m])) + + + (rate(prometheus_remote_storage_succeeded_samples_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) or rate(prometheus_remote_storage_samples_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m])) + ) + ) + * 100 + > 1 + for: 15m + labels: + severity: critical + - alert: PrometheusRemoteWriteBehind + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritebehind + summary: Prometheus remote write is behind. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_queue_highest_timestamp_seconds{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + - + max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + ) + > 120 + for: 15m + labels: + severity: critical + - alert: PrometheusRemoteWriteDesiredShards + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}` $labels.instance | query | first | value }}. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritedesiredshards + summary: Prometheus remote write desired shards calculation wants to run more than configured max shards. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_shards_desired{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + > + max_over_time(prometheus_remote_storage_shards_max{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) + ) + for: 15m + labels: + severity: warning + - alert: PrometheusRuleFailures + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusrulefailures + summary: Prometheus is failing rule evaluations. + expr: increase(prometheus_rule_evaluation_failures_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: critical + - alert: PrometheusMissingRuleEvaluations + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusmissingruleevaluations + summary: Prometheus is missing rule evaluations due to slow rule group evaluation. + expr: increase(prometheus_rule_group_iterations_missed_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusTargetLimitHit + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because the number of targets exceeded the configured target_limit. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetlimithit + summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit. + expr: increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusLabelLimitHit + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because some samples exceeded the configured label_limit, label_name_length_limit or label_value_length_limit. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuslabellimithit + summary: Prometheus has dropped targets because some scrape configs have exceeded the labels limit. + expr: increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusScrapeBodySizeLimitHit + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed {{ printf "%.0f" $value }} scrapes in the last 5m because some targets exceeded the configured body_size_limit. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapebodysizelimithit + summary: Prometheus has dropped some targets that exceeded body size limit. + expr: increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusScrapeSampleLimitHit + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed {{ printf "%.0f" $value }} scrapes in the last 5m because some targets exceeded the configured sample_limit. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapesamplelimithit + summary: Prometheus has failed scrapes that have exceeded the configured sample limit. + expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusTargetSyncFailure + annotations: + description: '{{ printf "%.0f" $value }} targets in Prometheus {{$labels.namespace}}/{{$labels.pod}} have failed to sync because invalid configuration was supplied.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetsyncfailure + summary: Prometheus has failed to sync targets. + expr: increase(prometheus_target_sync_failed_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[30m]) > 0 + for: 5m + labels: + severity: critical + - alert: PrometheusHighQueryLoad + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} query API has less than 20% available capacity in its query engine for the last 15 minutes. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload + summary: Prometheus is reaching its maximum capacity serving concurrent requests. + expr: avg_over_time(prometheus_engine_queries{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack"}[5m]) > 0.8 + for: 15m + labels: + severity: warning + - alert: PrometheusErrorSendingAlertsToAnyAlertmanager + annotations: + description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.' + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstoanyalertmanager + summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager. + expr: |- + min without (alertmanager) ( + rate(prometheus_notifications_errors_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack",alertmanager!~``}[5m]) + / + rate(prometheus_notifications_sent_total{job="kube-prometheus-stack-prometheus",namespace="kube-prometheus-stack",alertmanager!~``}[5m]) + ) + * 100 + > 3 + for: 15m + labels: + severity: critical +--- +# Source: kube-prometheus-stack/templates/redis-replication.yaml +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisReplication +metadata: + name: redis-replication-kube-prometheus-stack + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: redis-replication-kube-prometheus-stack + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + clusterSize: 3 + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v8.0.3 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 50m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + storageClassName: ceph-block + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.48.0 +--- +# Source: kube-prometheus-stack/templates/scrape-config.yaml +apiVersion: monitoring.coreos.com/v1alpha1 +kind: ScrapeConfig +metadata: + name: external-nodes-http + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: external-nodes-http + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + staticConfigs: + - labels: + job: external-nodes + targets: + - ps08rp.alexlebens.net:9100 + - ps09rp.alexlebens.net:9100 + metricsPath: /metrics + scheme: HTTP +--- +# Source: kube-prometheus-stack/templates/scrape-config.yaml +apiVersion: monitoring.coreos.com/v1alpha1 +kind: ScrapeConfig +metadata: + name: external-nodes-https + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: external-nodes-https + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + staticConfigs: + - labels: + job: external-nodes + targets: + - node-exporter-ps10rp.boreal-beaufort.ts.net + metricsPath: /metrics + scheme: HTTPS +--- +# Source: kube-prometheus-stack/templates/scrape-config.yaml +apiVersion: monitoring.coreos.com/v1alpha1 +kind: ScrapeConfig +metadata: + name: airgradient-http + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: airgradient-http + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + staticConfigs: + - labels: + job: airgradient + targets: + - it01ag.alexlebens.net:9926 + metricsPath: /metrics + scheme: HTTP +--- +# Source: kube-prometheus-stack/templates/scrape-config.yaml +apiVersion: monitoring.coreos.com/v1alpha1 +kind: ScrapeConfig +metadata: + name: garage-https + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: garage-https + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack +spec: + staticConfigs: + - labels: + job: garage + targets: + - garage-ps10rp.boreal-beaufort.ts.net:3903 + metricsPath: /metrics + scrapeInterval: 1m + scheme: HTTPS + authorization: + type: Bearer + credentials: + key: token + name: garage-metric-secret +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-kube-state-metrics + namespace: kube-prometheus-stack + labels: + helm.sh/chart: kube-state-metrics-6.4.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: kube-state-metrics + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "2.17.0" + release: kube-prometheus-stack +spec: + jobLabel: app.kubernetes.io/name + selector: + matchLabels: + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: kube-prometheus-stack + endpoints: + - port: http + honorLabels: true +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-prometheus-node-exporter + namespace: kube-prometheus-stack + labels: + helm.sh/chart: prometheus-node-exporter-4.49.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + app.kubernetes.io/part-of: prometheus-node-exporter + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "1.10.2" + release: kube-prometheus-stack +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app.kubernetes.io/name: prometheus-node-exporter + app.kubernetes.io/instance: kube-prometheus-stack + attachMetadata: + node: false + endpoints: + - port: http-metrics + scheme: http +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-alertmanager + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-alertmanager + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + + selector: + matchLabels: + app: kube-prometheus-stack-alertmanager + release: "kube-prometheus-stack" + self-monitor: "true" + namespaceSelector: + matchNames: + - "kube-prometheus-stack" + endpoints: + - port: http-web + enableHttp2: true + path: "/metrics" + - port: reloader-web + path: "/metrics" +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-coredns + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-coredns + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: kube-prometheus-stack-coredns + release: "kube-prometheus-stack" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-apiserver + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-apiserver + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + port: https + scheme: https + metricRelabelings: + - action: drop + regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|40|45|50)(\.0)? + sourceLabels: + - __name__ + - le + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + serverName: kubernetes + insecureSkipVerify: true + jobLabel: component + namespaceSelector: + matchNames: + - default + selector: + matchLabels: + component: apiserver + provider: kubernetes +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-kube-etcd + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-kube-etcd + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: kube-prometheus-stack-kube-etcd + release: "kube-prometheus-stack" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + metricRelabelings: + - action: labeldrop + regex: pod + relabelings: + - action: replace + regex: ^(.*)$ + replacement: $1 + separator: ; + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: nodename +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-kubelet + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-kubelet + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + + attachMetadata: + node: false + jobLabel: k8s-app + namespaceSelector: + matchNames: + - kube-system + selector: + matchLabels: + app.kubernetes.io/name: kubelet + k8s-app: kubelet + endpoints: + - port: https-metrics + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + honorLabels: true + honorTimestamps: true + metricRelabelings: + - action: drop + regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)? + sourceLabels: + - __name__ + - le + relabelings: + - action: replace + sourceLabels: + - __metrics_path__ + targetLabel: metrics_path + - port: https-metrics + scheme: https + path: /metrics/cadvisor + interval: 10s + honorLabels: true + honorTimestamps: true + trackTimestampsStaleness: true + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + metricRelabelings: + - action: drop + regex: container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total) + sourceLabels: + - __name__ + - action: drop + regex: container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total) + sourceLabels: + - __name__ + - action: drop + regex: container_memory_(mapped_file|swap) + sourceLabels: + - __name__ + - action: drop + regex: container_(file_descriptors|tasks_state|threads_max) + sourceLabels: + - __name__ + - action: drop + regex: container_memory_failures_total;hierarchy + sourceLabels: + - __name__ + - scope + - action: drop + regex: container_network_.*;(cali|cilium|cni|lxc|nodelocaldns|tunl).* + sourceLabels: + - __name__ + - interface + - action: drop + regex: container_spec.* + sourceLabels: + - __name__ + - action: drop + regex: .+; + sourceLabels: + - id + - pod + relabelings: + - action: replace + sourceLabels: + - __metrics_path__ + targetLabel: metrics_path + - port: https-metrics + scheme: https + path: /metrics/probes + honorLabels: true + honorTimestamps: true + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + relabelings: + - action: replace + sourceLabels: + - __metrics_path__ + targetLabel: metrics_path +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-operator + namespace: kube-prometheus-stack + labels: + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app: kube-prometheus-stack-operator + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator +spec: + + endpoints: + - port: https + scheme: https + tlsConfig: + serverName: kube-prometheus-stack-operator + ca: + secret: + name: kube-prometheus-stack-admission + key: ca + optional: false + honorLabels: true + selector: + matchLabels: + app: kube-prometheus-stack-operator + release: "kube-prometheus-stack" + namespaceSelector: + matchNames: + - "kube-prometheus-stack" +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: kube-prometheus-stack-prometheus + namespace: kube-prometheus-stack + labels: + app: kube-prometheus-stack-prometheus + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" +spec: + + selector: + matchLabels: + app: kube-prometheus-stack-prometheus + release: "kube-prometheus-stack" + self-monitor: "true" + namespaceSelector: + matchNames: + - "kube-prometheus-stack" + endpoints: + - port: http-web + path: "/metrics" + - port: reloader-web + path: "/metrics" +--- +# Source: kube-prometheus-stack/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: redis-replication-kube-prometheus-stack + namespace: kube-prometheus-stack + labels: + app.kubernetes.io/name: redis-replication-kube-prometheus-stack + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/part-of: kube-prometheus-stack + redis-operator: "true" + env: production +spec: + selector: + matchLabels: + redis_setup_type: replication + endpoints: + - port: redis-exporter + interval: 30s + scrapeTimeout: 10s +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: kube-prometheus-stack-admission + annotations: + + argocd.argoproj.io/hook: PreSync + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +webhooks: + - name: prometheusrulevalidate.monitoring.coreos.com + failurePolicy: Ignore + rules: + - apiGroups: + - monitoring.coreos.com + apiVersions: + - "*" + resources: + - prometheusrules + operations: + - CREATE + - UPDATE + clientConfig: + service: + namespace: kube-prometheus-stack + name: kube-prometheus-stack-operator + path: /admission-prometheusrules/validate + timeoutSeconds: 10 + admissionReviewVersions: ["v1", "v1beta1"] + sideEffects: None + - name: alertmanagerconfigsvalidate.monitoring.coreos.com + failurePolicy: Ignore + rules: + - apiGroups: + - monitoring.coreos.com + apiVersions: + - v1alpha1 + resources: + - alertmanagerconfigs + operations: + - CREATE + - UPDATE + clientConfig: + service: + namespace: kube-prometheus-stack + name: kube-prometheus-stack-operator + path: /admission-alertmanagerconfigs/validate + timeoutSeconds: 10 + admissionReviewVersions: ["v1", "v1beta1"] + sideEffects: None +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-prometheus-stack-admission + namespace: kube-prometheus-stack + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +automountServiceAccountToken: true +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-prometheus-stack-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-prometheus-stack-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-prometheus-stack-admission +subjects: + - kind: ServiceAccount + name: kube-prometheus-stack-admission + namespace: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: kube-prometheus-stack-admission + namespace: kube-prometheus-stack + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-prometheus-stack-admission + namespace: kube-prometheus-stack + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: kube-prometheus-stack-admission + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kube-prometheus-stack-admission +subjects: + - kind: ServiceAccount + name: kube-prometheus-stack-admission + namespace: kube-prometheus-stack +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-prometheus-stack-admission-create + namespace: kube-prometheus-stack + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + labels: + app: kube-prometheus-stack-admission-create + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +spec: + ttlSecondsAfterFinished: 60 + template: + metadata: + name: kube-prometheus-stack-admission-create + labels: + app: kube-prometheus-stack-admission-create + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook + spec: + containers: + - name: create + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.4 + imagePullPolicy: IfNotPresent + args: + - create + - --host=kube-prometheus-stack-operator,kube-prometheus-stack-operator.kube-prometheus-stack.svc + - --namespace=kube-prometheus-stack + - --secret-name=kube-prometheus-stack-admission + securityContext: + + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: + {} + restartPolicy: OnFailure + serviceAccountName: kube-prometheus-stack-admission + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + seccompProfile: + type: RuntimeDefault +--- +# Source: kube-prometheus-stack/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-prometheus-stack-admission-patch + namespace: kube-prometheus-stack + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + labels: + app: kube-prometheus-stack-admission-patch + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook +spec: + ttlSecondsAfterFinished: 60 + template: + metadata: + name: kube-prometheus-stack-admission-patch + labels: + app: kube-prometheus-stack-admission-patch + + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: kube-prometheus-stack + app.kubernetes.io/version: "79.7.1" + app.kubernetes.io/part-of: kube-prometheus-stack + chart: kube-prometheus-stack-79.7.1 + release: "kube-prometheus-stack" + heritage: "Helm" + app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator + app.kubernetes.io/component: prometheus-operator-webhook + spec: + containers: + - name: patch + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.4 + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=kube-prometheus-stack-admission + - --namespace=kube-prometheus-stack + - --secret-name=kube-prometheus-stack-admission + - --patch-failure-policy= + securityContext: + + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: + {} + restartPolicy: OnFailure + serviceAccountName: kube-prometheus-stack-admission + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + seccompProfile: + type: RuntimeDefault diff --git a/clusters/cl01tl/manifests/loki/loki.yaml b/clusters/cl01tl/manifests/loki/loki.yaml new file mode 100644 index 000000000..41b20d9e2 --- /dev/null +++ b/clusters/cl01tl/manifests/loki/loki.yaml @@ -0,0 +1,1535 @@ +--- +# Source: loki/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: loki + labels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/part-of: loki + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: loki/charts/loki/templates/loki-canary/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: loki-canary + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: canary +automountServiceAccountToken: true +--- +# Source: loki/charts/loki/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: loki + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" +automountServiceAccountToken: true +--- +# Source: loki/charts/promtail/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: loki-promtail + namespace: loki + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm +automountServiceAccountToken: true +--- +# Source: loki/charts/promtail/templates/secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: loki-promtail + namespace: loki + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm +stringData: + promtail.yaml: | + server: + log_level: info + log_format: logfmt + http_listen_port: 3101 + + + clients: + - tenant_id: 1 + url: http://loki-gateway.loki.svc.cluster.local:80/loki/api/v1/push + + positions: + filename: /run/promtail/positions.yaml + + scrape_configs: + # See also https://github.com/grafana/loki/blob/master/production/ksonnet/promtail/scrape_config.libsonnet for reference + - job_name: kubernetes-pods + pipeline_stages: + - cri: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_controller_name + regex: ([0-9a-z-.]+?)(-[0-9a-f]{8,10})? + action: replace + target_label: __tmp_controller_name + - source_labels: + - __meta_kubernetes_pod_label_app_kubernetes_io_name + - __meta_kubernetes_pod_label_app + - __tmp_controller_name + - __meta_kubernetes_pod_name + regex: ^;*([^;]+)(;.*)?$ + action: replace + target_label: app + - source_labels: + - __meta_kubernetes_pod_label_app_kubernetes_io_instance + - __meta_kubernetes_pod_label_instance + regex: ^;*([^;]+)(;.*)?$ + action: replace + target_label: instance + - source_labels: + - __meta_kubernetes_pod_label_app_kubernetes_io_component + - __meta_kubernetes_pod_label_component + regex: ^;*([^;]+)(;.*)?$ + action: replace + target_label: component + - action: replace + source_labels: + - __meta_kubernetes_pod_node_name + target_label: node_name + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + replacement: $1 + separator: / + source_labels: + - namespace + - app + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - action: replace + replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - action: replace + regex: true/(.*) + replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash + - __meta_kubernetes_pod_annotation_kubernetes_io_config_hash + - __meta_kubernetes_pod_container_name + target_label: __path__ + + + + limits_config: + + + tracing: + enabled: false +--- +# Source: loki/charts/loki/templates/config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" +data: + config.yaml: | + + auth_enabled: false + bloom_build: + builder: + planner_address: "" + enabled: false + bloom_gateway: + client: + addresses: "" + enabled: false + chunk_store_config: + chunk_cache_config: + background: + writeback_buffer: 500000 + writeback_goroutines: 1 + writeback_size_limit: 500MB + default_validity: 0s + memcached: + batch_size: 4 + parallelism: 5 + memcached_client: + addresses: dnssrvnoa+_memcached-client._tcp.loki-chunks-cache.loki.svc.cluster.local + consistent_hash: true + max_idle_conns: 72 + timeout: 2000ms + common: + compactor_grpc_address: 'loki.loki.svc.cluster.local:9095' + path_prefix: /var/loki + replication_factor: 1 + storage: + filesystem: + chunks_directory: /var/loki/chunks + rules_directory: /var/loki/rules + compactor: + compaction_interval: 10m + delete_request_store: filesystem + retention_delete_delay: 2h + retention_delete_worker_count: 150 + retention_enabled: true + working_directory: /var/loki/compactor + frontend: + scheduler_address: "" + tail_proxy_url: "" + frontend_worker: + scheduler_address: "" + index_gateway: + mode: simple + ingester_client: + pool_config: + remote_timeout: 10s + remote_timeout: 10s + limits_config: + allow_structured_metadata: false + ingestion_burst_size_mb: 1024 + ingestion_rate_mb: 1024 + max_cache_freshness_per_query: 10m + max_streams_per_user: 100000 + query_timeout: 300s + reject_old_samples: true + reject_old_samples_max_age: 168h + retention_period: 7d + split_queries_by_interval: 15m + volume_enabled: true + memberlist: + join_members: + - loki-memberlist.loki.svc.cluster.local + pattern_ingester: + enabled: false + query_range: + align_queries_with_step: true + cache_results: true + results_cache: + cache: + background: + writeback_buffer: 500000 + writeback_goroutines: 1 + writeback_size_limit: 500MB + default_validity: 12h + memcached_client: + addresses: dnssrvnoa+_memcached-client._tcp.loki-results-cache.loki.svc.cluster.local + consistent_hash: true + timeout: 500ms + update_interval: 1m + ruler: + storage: + type: local + wal: + dir: /var/loki/ruler-wal + runtime_config: + file: /etc/loki/runtime-config/runtime-config.yaml + schema_config: + configs: + - from: "2024-01-11" + index: + period: 24h + object_store: filesystem + schema: v13 + store: boltdb-shipper + server: + grpc_listen_port: 9095 + http_listen_port: 3100 + http_server_read_timeout: 600s + http_server_write_timeout: 600s + storage_config: + bloom_shipper: + working_directory: /var/loki/data/bloomshipper + boltdb_shipper: + index_gateway_client: + server_address: "" + hedging: + at: 250ms + max_per_second: 20 + up_to: 3 + tsdb_shipper: + index_gateway_client: + server_address: "" + use_thanos_objstore: false + tracing: + enabled: false +--- +# Source: loki/charts/loki/templates/gateway/configmap-gateway.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki-gateway + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: gateway +data: + nginx.conf: | + worker_processes 5; ## Default: 1 + error_log /dev/stderr; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + http { + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + client_max_body_size 4M; + + proxy_read_timeout 600; ## 10 minutes + proxy_send_timeout 600; + proxy_connect_timeout 600; + + proxy_http_version 1.1; + + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /dev/stderr main; + + sendfile on; + tcp_nopush on; + resolver kube-dns.kube-system.svc.cluster.local.; + + # if the X-Query-Tags header is empty, set a noop= without a value as empty values are not logged + map $http_x_query_tags $query_tags { + "" "noop="; # When header is empty, set noop= + default $http_x_query_tags; # Otherwise, preserve the original value + } + + server { + listen 8080; + listen [::]:8080; + + location = / { + + return 200 'OK'; + auth_basic off; + } + + ######################################################## + # Configure backend targets + location ^~ /ui { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # Distributor + location = /api/prom/push { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /loki/api/v1/push { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /distributor/ring { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /otlp/v1/logs { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # Ingester + location = /flush { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location ^~ /ingester/ { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /ingester { + + internal; # to suppress 301 + } + + # Ring + location = /ring { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # MemberListKV + location = /memberlist { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # Ruler + location = /ruler/ring { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /api/prom/rules { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location ^~ /api/prom/rules/ { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /loki/api/v1/rules { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location ^~ /loki/api/v1/rules/ { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /prometheus/api/v1/alerts { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /prometheus/api/v1/rules { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # Compactor + location = /compactor/ring { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /loki/api/v1/delete { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /loki/api/v1/cache/generation_numbers { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # IndexGateway + location = /indexgateway/ring { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # QueryScheduler + location = /scheduler/ring { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + # Config + location = /config { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + + + # QueryFrontend, Querier + location = /api/prom/tail { + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /loki/api/v1/tail { + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location ^~ /api/prom/ { + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /api/prom { + + internal; # to suppress 301 + } + location ^~ /loki/api/v1/ { + # pass custom headers set by Grafana as X-Query-Tags which are logged as key/value pairs in metrics.go log messages + proxy_set_header X-Query-Tags "${query_tags},user=${http_x_grafana_user},dashboard_id=${http_x_dashboard_uid},dashboard_title=${http_x_dashboard_title},panel_id=${http_x_panel_id},panel_title=${http_x_panel_title},source_rule_uid=${http_x_rule_uid},rule_name=${http_x_rule_name},rule_folder=${http_x_rule_folder},rule_version=${http_x_rule_version},rule_source=${http_x_rule_source},rule_type=${http_x_rule_type}"; + + proxy_pass http://loki.loki.svc.cluster.local:3100$request_uri; + } + location = /loki/api/v1 { + + internal; # to suppress 301 + } + } + } +--- +# Source: loki/charts/loki/templates/runtime-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki-runtime + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" +data: + runtime-config.yaml: | + {} +--- +# Source: loki/charts/loki/templates/backend/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + name: loki-clusterrole +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: loki/charts/promtail/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: loki-promtail + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: + - get + - watch + - list +--- +# Source: loki/charts/loki/templates/backend/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: loki-clusterrolebinding + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" +subjects: + - kind: ServiceAccount + name: loki + namespace: loki +roleRef: + kind: ClusterRole + name: loki-clusterrole + apiGroup: rbac.authorization.k8s.io +--- +# Source: loki/charts/promtail/templates/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: loki-promtail + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm +subjects: + - kind: ServiceAccount + name: loki-promtail + namespace: loki +roleRef: + kind: ClusterRole + name: loki-promtail + apiGroup: rbac.authorization.k8s.io +--- +# Source: loki/charts/loki/templates/chunks-cache/service-chunks-cache-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-chunks-cache + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: "memcached-chunks-cache" + annotations: + {} + namespace: "loki" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: client + - name: http-metrics + port: 9150 + targetPort: http-metrics + + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: "memcached-chunks-cache" +--- +# Source: loki/charts/loki/templates/gateway/service-gateway.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-gateway + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: gateway + prometheus.io/service-monitor: "false" + annotations: +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 80 + targetPort: http-metrics + protocol: TCP + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: gateway +--- +# Source: loki/charts/loki/templates/loki-canary/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-canary + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: canary + annotations: +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3500 + targetPort: http-metrics + protocol: TCP + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: canary +--- +# Source: loki/charts/loki/templates/results-cache/service-results-cache-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-results-cache + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: "memcached-results-cache" + annotations: + {} + namespace: "loki" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: client + - name: http-metrics + port: 9150 + targetPort: http-metrics + + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: "memcached-results-cache" +--- +# Source: loki/charts/loki/templates/service-memberlist.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-memberlist + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + annotations: +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp + port: 7946 + targetPort: http-memberlist + protocol: TCP + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/part-of: memberlist +--- +# Source: loki/charts/loki/templates/single-binary/service-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-headless + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + variant: headless + prometheus.io/service-monitor: "false" + annotations: +spec: + clusterIP: None + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki +--- +# Source: loki/charts/loki/templates/single-binary/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + annotations: +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3100 + targetPort: http-metrics + protocol: TCP + - name: grpc + port: 9095 + targetPort: grpc + protocol: TCP + selector: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: single-binary +--- +# Source: loki/charts/promtail/templates/service-metrics.yaml +apiVersion: v1 +kind: Service +metadata: + name: loki-promtail-metrics + namespace: loki + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm + promtail: 3.0.0 +spec: + clusterIP: None + ports: + - name: http-metrics + port: 3101 + targetPort: http-metrics + protocol: TCP + selector: + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki +--- +# Source: loki/charts/loki/templates/loki-canary/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loki-canary + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: canary +spec: + selector: + matchLabels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: canary + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: canary + spec: + serviceAccountName: loki-canary + + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + containers: + - name: loki-canary + image: docker.io/grafana/loki-canary:3.5.7 + imagePullPolicy: IfNotPresent + args: + - -addr=loki-gateway.loki.svc.cluster.local.:80 + - -labelname=pod + - -labelvalue=$(POD_NAME) + - -push=true + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + ports: + - name: http-metrics + containerPort: 3500 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + + readinessProbe: + httpGet: + path: /metrics + port: http-metrics + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumes: +--- +# Source: loki/charts/promtail/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loki-promtail + namespace: loki + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + updateStrategy: + {} + template: + metadata: + labels: + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + annotations: + checksum/config: c63810d2a03283062a5987b913985abc93a7e5cb90fde608a9f3ef77cb4e3412 + spec: + serviceAccountName: loki-promtail + automountServiceAccountToken: true + enableServiceLinks: true + securityContext: + runAsGroup: 0 + runAsUser: 0 + containers: + - name: promtail + image: "docker.io/grafana/promtail:3.5.1" + imagePullPolicy: IfNotPresent + args: + - "-config.file=/etc/promtail/promtail.yaml" + volumeMounts: + - name: config + mountPath: /etc/promtail + - mountPath: /run/promtail + name: run + - mountPath: /var/lib/docker/containers + name: containers + readOnly: true + - mountPath: /var/log/pods + name: pods + readOnly: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - name: http-metrics + containerPort: 3101 + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + readinessProbe: + failureThreshold: 5 + httpGet: + path: '/ready' + port: http-metrics + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: config + secret: + secretName: loki-promtail + - hostPath: + path: /run/promtail + name: run + - hostPath: + path: /var/lib/docker/containers + name: containers + - hostPath: + path: /var/log/pods + name: pods +--- +# Source: loki/charts/loki/templates/gateway/deployment-gateway-nginx.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: loki-gateway + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: gateway +spec: + replicas: 1 + strategy: + type: RollingUpdate + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: gateway + template: + metadata: + annotations: + checksum/config: d76bd0b627b1549dddc6ce5304d9322ebdeb13e5b813234d8067357925630015 + labels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: gateway + spec: + serviceAccountName: loki + enableServiceLinks: true + + securityContext: + fsGroup: 101 + runAsGroup: 101 + runAsNonRoot: true + runAsUser: 101 + terminationGracePeriodSeconds: 30 + containers: + - name: nginx + image: docker.io/nginxinc/nginx-unprivileged:1.29-alpine + imagePullPolicy: IfNotPresent + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: 15 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: config + mountPath: /etc/nginx + - name: tmp + mountPath: /tmp + - name: docker-entrypoint-d-override + mountPath: /docker-entrypoint.d + resources: + {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: gateway + app.kubernetes.io/instance: 'loki' + app.kubernetes.io/name: 'loki' + topologyKey: kubernetes.io/hostname + volumes: + - name: config + configMap: + name: loki-gateway + - name: tmp + emptyDir: {} + - name: docker-entrypoint-d-override + emptyDir: {} +--- +# Source: loki/charts/loki/templates/chunks-cache/statefulset-chunks-cache.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: loki-chunks-cache + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: "memcached-chunks-cache" + name: "memcached-chunks-cache" + annotations: + {} + namespace: "loki" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: "memcached-chunks-cache" + name: "memcached-chunks-cache" + updateStrategy: + type: RollingUpdate + serviceName: loki-chunks-cache + template: + metadata: + labels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: "memcached-chunks-cache" + name: "memcached-chunks-cache" + annotations: + spec: + serviceAccountName: loki + securityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + [] + tolerations: + [] + terminationGracePeriodSeconds: 60 + containers: + - name: memcached + image: memcached:1.6.39-alpine + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 9830Mi + requests: + cpu: 500m + memory: 9830Mi + ports: + - containerPort: 11211 + name: client + args: + - -m 8192 + - --extended=modern,track_sizes + - -I 5m + - -c 16384 + - -v + - -u 11211 + env: + envFrom: + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 5 + tcpSocket: + port: client + timeoutSeconds: 3 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + tcpSocket: + port: client + timeoutSeconds: 5 + - name: exporter + image: prom/memcached-exporter:v0.15.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:11211" + - "--web.listen-address=0.0.0.0:9150" + resources: + limits: {} + requests: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + readinessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: http-metrics + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: http-metrics + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 +--- +# Source: loki/charts/loki/templates/results-cache/statefulset-results-cache.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: loki-results-cache + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: "memcached-results-cache" + name: "memcached-results-cache" + annotations: + {} + namespace: "loki" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: "memcached-results-cache" + name: "memcached-results-cache" + updateStrategy: + type: RollingUpdate + serviceName: loki-results-cache + template: + metadata: + labels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: "memcached-results-cache" + name: "memcached-results-cache" + annotations: + spec: + serviceAccountName: loki + securityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + [] + tolerations: + [] + terminationGracePeriodSeconds: 60 + containers: + - name: memcached + image: memcached:1.6.39-alpine + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 1229Mi + requests: + cpu: 500m + memory: 1229Mi + ports: + - containerPort: 11211 + name: client + args: + - -m 1024 + - --extended=modern,track_sizes + - -I 5m + - -c 16384 + - -v + - -u 11211 + env: + envFrom: + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 5 + tcpSocket: + port: client + timeoutSeconds: 3 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + tcpSocket: + port: client + timeoutSeconds: 5 + - name: exporter + image: prom/memcached-exporter:v0.15.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:11211" + - "--web.listen-address=0.0.0.0:9150" + resources: + limits: {} + requests: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + readinessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: http-metrics + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /metrics + port: http-metrics + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 +--- +# Source: loki/charts/loki/templates/single-binary/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: loki + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: single-binary + app.kubernetes.io/part-of: memberlist +spec: + replicas: 1 + podManagementPolicy: Parallel + updateStrategy: + rollingUpdate: + partition: 0 + serviceName: loki-headless + revisionHistoryLimit: 10 + + persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Delete + selector: + matchLabels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: single-binary + template: + metadata: + annotations: + checksum/config: 9cded33d7ba292eb76711b451f5ecd9bade13c7fb5ffb5622229f5706f8f90dd + storage/size: "150Gi" + kubectl.kubernetes.io/default-container: "loki" + labels: + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/component: single-binary + app.kubernetes.io/part-of: memberlist + spec: + serviceAccountName: loki + automountServiceAccountToken: true + enableServiceLinks: true + + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + terminationGracePeriodSeconds: 30 + containers: + - name: loki + image: docker.io/grafana/loki:3.5.7 + imagePullPolicy: IfNotPresent + args: + - -config.file=/etc/loki/config/config.yaml + - -target=all + ports: + - name: http-metrics + containerPort: 3100 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: http-memberlist + containerPort: 7946 + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: config + mountPath: /etc/loki/config + - name: runtime-config + mountPath: /etc/loki/runtime-config + - name: storage + mountPath: /var/loki + - name: sc-rules-volume + mountPath: "/rules" + resources: + {} + - name: loki-sc-rules + image: docker.io/kiwigrid/k8s-sidecar:1.30.10 + imagePullPolicy: IfNotPresent + env: + - name: METHOD + value: WATCH + - name: LABEL + value: "loki_rule" + - name: FOLDER + value: "/rules" + - name: RESOURCE + value: "both" + - name: WATCH_SERVER_TIMEOUT + value: "60" + - name: WATCH_CLIENT_TIMEOUT + value: "60" + - name: LOG_LEVEL + value: "INFO" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: sc-rules-volume + mountPath: "/rules" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: single-binary + app.kubernetes.io/instance: 'loki' + app.kubernetes.io/name: 'loki' + topologyKey: kubernetes.io/hostname + volumes: + - name: tmp + emptyDir: {} + - name: config + configMap: + name: loki + items: + - key: "config.yaml" + path: "config.yaml" + - name: runtime-config + configMap: + name: loki-runtime + - name: sc-rules-volume + emptyDir: {} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + storageClassName: synology-iscsi-delete + resources: + requests: + storage: "150Gi" +--- +# Source: loki/charts/promtail/templates/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: loki-promtail + labels: + helm.sh/chart: promtail-6.17.1 + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.1" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: promtail + app.kubernetes.io/instance: loki + endpoints: + - port: http-metrics + scheme: http +--- +# Source: loki/charts/loki/templates/tests/test-canary.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "loki-helm-test" + namespace: loki + labels: + helm.sh/chart: loki-6.46.0 + app.kubernetes.io/name: loki + app.kubernetes.io/instance: loki + app.kubernetes.io/version: "3.5.7" + app.kubernetes.io/component: helm-test + annotations: + "helm.sh/hook": test +spec: + containers: + - name: loki-helm-test + image: docker.io/grafana/loki-helm-test:latest + env: + - name: CANARY_SERVICE_ADDRESS + value: "http://loki-canary.loki.svc.cluster.local:3500/metrics" + - name: CANARY_PROMETHEUS_ADDRESS + value: "" + - name: CANARY_TEST_TIMEOUT + value: "1m" + args: + - -test.v + restartPolicy: Never diff --git a/clusters/cl01tl/manifests/s3-exporter/s3-exporter.yaml b/clusters/cl01tl/manifests/s3-exporter/s3-exporter.yaml new file mode 100644 index 000000000..c3e86fb39 --- /dev/null +++ b/clusters/cl01tl/manifests/s3-exporter/s3-exporter.yaml @@ -0,0 +1,559 @@ +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: s3-exporter-digital-ocean + labels: + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/service: s3-exporter-digital-ocean + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + type: ClusterIP + ports: + - port: 9655 + targetPort: 9655 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: digital-ocean + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: s3-exporter-garage-local + labels: + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/service: s3-exporter-garage-local + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + type: ClusterIP + ports: + - port: 9655 + targetPort: 9655 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: garage-local + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: s3-exporter-garage-remote + labels: + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/service: s3-exporter-garage-remote + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + type: ClusterIP + ports: + - port: 9655 + targetPort: 9655 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: garage-remote + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: s3-exporter-ceph-directus + labels: + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/service: s3-exporter-ceph-directus + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + type: ClusterIP + ports: + - port: 9655 + targetPort: 9655 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: ceph-directus + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: s3-exporter-ceph-directus + labels: + app.kubernetes.io/controller: ceph-directus + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: ceph-directus + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + template: + metadata: + labels: + app.kubernetes.io/controller: ceph-directus + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: S3_NAME + value: ceph-directus + - name: S3_ENDPOINT + valueFrom: + secretKeyRef: + key: BUCKET_HOST + name: s3-ceph-directus-secret + - name: S3_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: s3-ceph-directus-secret + - name: S3_SECRET_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: s3-ceph-directus-secret + - name: S3_REGION + value: us-east-1 + - name: LOG_LEVEL + value: info + - name: S3_FORCE_PATH_STYLE + value: "true" + image: molu8bits/s3bucket_exporter:1.0.2 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 64Mi +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: s3-exporter-digital-ocean + labels: + app.kubernetes.io/controller: digital-ocean + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: digital-ocean + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + template: + metadata: + labels: + app.kubernetes.io/controller: digital-ocean + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: S3_NAME + value: digital-ocean + - name: S3_ENDPOINT + value: https://nyc3.digitaloceanspaces.com + - name: S3_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: s3-do-home-infra-secret + - name: S3_SECRET_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: s3-do-home-infra-secret + - name: S3_REGION + valueFrom: + secretKeyRef: + key: AWS_REGION + name: s3-do-home-infra-secret + - name: LOG_LEVEL + value: info + - name: S3_FORCE_PATH_STYLE + value: "false" + image: molu8bits/s3bucket_exporter:1.0.2 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 64Mi +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: s3-exporter-garage-local + labels: + app.kubernetes.io/controller: garage-local + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: garage-local + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + template: + metadata: + labels: + app.kubernetes.io/controller: garage-local + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: S3_NAME + value: garage-local + - name: S3_ENDPOINT + value: http://garage-main.garage:3900 + - name: S3_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: s3-garage-secret + - name: S3_SECRET_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: s3-garage-secret + - name: S3_REGION + value: us-east-1 + - name: LOG_LEVEL + value: debug + - name: S3_FORCE_PATH_STYLE + value: "true" + image: molu8bits/s3bucket_exporter:1.0.2 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 64Mi +--- +# Source: s3-exporter/charts/s3-exporter/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: s3-exporter-garage-remote + labels: + app.kubernetes.io/controller: garage-remote + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: s3-exporter + helm.sh/chart: s3-exporter-4.4.0 + namespace: s3-exporter +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: garage-remote + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + template: + metadata: + labels: + app.kubernetes.io/controller: garage-remote + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/name: s3-exporter + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: S3_NAME + value: garage-remote + - name: S3_ENDPOINT + value: https://garage-ps10rp.boreal-beaufort.ts.net:3900 + - name: S3_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: s3-garage-secret + - name: S3_SECRET_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: s3-garage-secret + - name: S3_REGION + value: us-east-1 + - name: LOG_LEVEL + value: debug + - name: S3_FORCE_PATH_STYLE + value: "true" + image: molu8bits/s3bucket_exporter:1.0.2 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 64Mi +--- +# Source: s3-exporter/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: s3-do-home-infra-secret + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-do-home-infra-secret + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/all-access + metadataPolicy: None + property: AWS_ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/all-access + metadataPolicy: None + property: AWS_SECRET_ACCESS_KEY + - secretKey: AWS_REGION + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /digital-ocean/home-infra/prometheus-exporter + metadataPolicy: None + property: AWS_REGION +--- +# Source: s3-exporter/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: s3-ceph-directus-secret + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-ceph-directus-secret + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/ceph + metadataPolicy: None + property: AWS_ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/ceph + metadataPolicy: None + property: AWS_SECRET_ACCESS_KEY + - secretKey: BUCKET_HOST + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /cl01tl/directus/ceph + metadataPolicy: None + property: BUCKET_HOST +--- +# Source: s3-exporter/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: s3-garage-secret + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-garage-secret + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: AWS_ACCESS_KEY_ID + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/s3-exporter + metadataPolicy: None + property: ACCESS_KEY_ID + - secretKey: AWS_SECRET_ACCESS_KEY + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /garage/home-infra/s3-exporter + metadataPolicy: None + property: ACCESS_SECRET_KEY +--- +# Source: s3-exporter/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: s3-exporter-digital-ocean + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-exporter-digital-ocean + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + selector: + matchLabels: + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/service: s3-exporter-digital-ocean + endpoints: + - port: metrics + interval: 5m + scrapeTimeout: 120s + path: /metrics +--- +# Source: s3-exporter/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: s3-exporter-ceph-directus + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-exporter-ceph-directus + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + selector: + matchLabels: + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/service: s3-exporter-ceph-directus + endpoints: + - port: metrics + interval: 5m + scrapeTimeout: 120s + path: /metrics +--- +# Source: s3-exporter/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: s3-exporter-garage-local + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-exporter-garage-local + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + selector: + matchLabels: + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/service: s3-exporter-garage-local + endpoints: + - port: metrics + interval: 5m + scrapeTimeout: 120s + path: /metrics +--- +# Source: s3-exporter/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: s3-exporter-garage-remote + namespace: s3-exporter + labels: + app.kubernetes.io/name: s3-exporter-garage-remote + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/part-of: s3-exporter +spec: + selector: + matchLabels: + app.kubernetes.io/name: s3-exporter + app.kubernetes.io/instance: s3-exporter + app.kubernetes.io/service: s3-exporter-garage-remote + endpoints: + - port: metrics + interval: 5m + scrapeTimeout: 120s + path: /metrics diff --git a/clusters/cl01tl/manifests/shelly-plug/shelly-plug.yaml b/clusters/cl01tl/manifests/shelly-plug/shelly-plug.yaml new file mode 100644 index 000000000..e22c7be1b --- /dev/null +++ b/clusters/cl01tl/manifests/shelly-plug/shelly-plug.yaml @@ -0,0 +1,185 @@ +--- +# Source: shelly-plug/charts/shelly-plug/templates/common.yaml +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: shelly-plug + labels: + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: shelly-plug + helm.sh/chart: shelly-plug-4.4.0 + annotations: + helm.sh/resource-policy: keep + namespace: shelly-plug +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" + storageClassName: "ceph-block" +--- +# Source: shelly-plug/charts/shelly-plug/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: shelly-plug + labels: + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: shelly-plug + app.kubernetes.io/service: shelly-plug + helm.sh/chart: shelly-plug-4.4.0 + namespace: shelly-plug +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/name: shelly-plug +--- +# Source: shelly-plug/charts/shelly-plug/templates/common.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: shelly-plug + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: shelly-plug + helm.sh/chart: shelly-plug-4.4.0 + namespace: shelly-plug +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: shelly-plug + app.kubernetes.io/instance: shelly-plug + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/name: shelly-plug + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + initContainers: + - command: + - /bin/sh + - -ec + - | + cd /var/www/html + if [ -d ".git" ]; then + echo "Git repository found. Pulling latest changes..." + git pull + else + echo "Not a git repository. Initializing ..." + git init + git remote add origin https://github.com/geerlingguy/shelly-plug-prometheus.git + git fetch origin + git checkout origin/master -ft + fi + image: alpine/git:latest + imagePullPolicy: IfNotPresent + name: init-fetch-repo + resources: + requests: + cpu: 10m + memory: 128Mi + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/www/html + name: script + containers: + - env: + - name: SHELLY_HOSTNAME + value: it05sp.alexlebens.net + - name: SHELLY_GENERATION + value: "2" + envFrom: + - secretRef: + name: shelly-plug-config-secret + image: php:8.4.15-apache-bookworm + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 64Mi + volumeMounts: + - mountPath: /var/www/html + name: script + volumes: + - name: script + persistentVolumeClaim: + claimName: shelly-plug +--- +# Source: shelly-plug/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: shelly-plug-config-secret + namespace: shelly-plug + labels: + app.kubernetes.io/name: shelly-plug-config-secret + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/part-of: shelly-plug +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: SHELLY_HTTP_USERNAME + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /shelly-plug/auth/it05sp + metadataPolicy: None + property: SHELLY_HTTP_USERNAME + - secretKey: SHELLY_HTTP_PASSWORD + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /shelly-plug/auth/it05sp + metadataPolicy: None + property: SHELLY_HTTP_PASSWORD +--- +# Source: shelly-plug/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: shelly-plug + namespace: shelly-plug + labels: + app.kubernetes.io/name: shelly-plug + app.kubernetes.io/instance: shelly-plug + app.kubernetes.io/part-of: shelly-plug +spec: + selector: + matchLabels: + app.kubernetes.io/name: shelly-plug + app.kubernetes.io/instance: shelly-plug + endpoints: + - port: metrics + interval: 30s + scrapeTimeout: 10s + path: /metrics diff --git a/clusters/cl01tl/manifests/trivy/trivy.yaml b/clusters/cl01tl/manifests/trivy/trivy.yaml new file mode 100644 index 000000000..8ba97f190 --- /dev/null +++ b/clusters/cl01tl/manifests/trivy/trivy.yaml @@ -0,0 +1,5222 @@ +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_clustercompliancereports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clustercompliancereports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ClusterComplianceReport + listKind: ClusterComplianceReportList + plural: clustercompliancereports + shortNames: + - compliance + singular: clustercompliancereport + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of checks that failed + jsonPath: .status.summary.failCount + name: Fail + priority: 1 + type: integer + - description: The number of checks that passed + jsonPath: .status.summary.passCount + name: Pass + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterComplianceReport is a specification for the ClusterComplianceReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ReportSpec represent the compliance specification + properties: + compliance: + properties: + controls: + description: Control represent the cps controls data and mapping + checks + items: + description: Control represent the cps controls data and mapping + checks + properties: + checks: + items: + description: SpecCheck represent the scanner who perform + the control check + properties: + id: + description: id define the check id as produced by + scanner + type: string + required: + - id + type: object + type: array + commands: + items: + description: Commands represent the commands to be executed + by the node-collector + properties: + id: + description: id define the commands id + type: string + required: + - id + type: object + type: array + defaultStatus: + description: define the default value for check status in + case resource not found + enum: + - PASS + - WARN + - FAIL + type: string + description: + type: string + id: + description: id define the control check id + type: string + name: + type: string + severity: + description: define the severity of the control + enum: + - CRITICAL + - HIGH + - MEDIUM + - LOW + - UNKNOWN + type: string + required: + - id + - name + - severity + type: object + type: array + description: + type: string + id: + type: string + platform: + type: string + relatedResources: + items: + type: string + type: array + title: + type: string + type: + type: string + version: + type: string + required: + - controls + - description + - id + - platform + - relatedResources + - title + - type + - version + type: object + cron: + description: cron define the intervals for report generation + pattern: ^(((([\*]{1}){1})|((\*\/){0,1}(([0-9]{1}){1}|(([1-5]{1}){1}([0-9]{1}){1}){1}))) + ((([\*]{1}){1})|((\*\/){0,1}(([0-9]{1}){1}|(([1]{1}){1}([0-9]{1}){1}){1}|([2]{1}){1}([0-3]{1}){1}))) + ((([\*]{1}){1})|((\*\/){0,1}(([1-9]{1}){1}|(([1-2]{1}){1}([0-9]{1}){1}){1}|([3]{1}){1}([0-1]{1}){1}))) + ((([\*]{1}){1})|((\*\/){0,1}(([1-9]{1}){1}|(([1-2]{1}){1}([0-9]{1}){1}){1}|([3]{1}){1}([0-1]{1}){1}))|(jan|feb|mar|apr|may|jun|jul|aug|sep|okt|nov|dec)) + ((([\*]{1}){1})|((\*\/){0,1}(([0-7]{1}){1}))|(sun|mon|tue|wed|thu|fri|sat)))$ + type: string + reportType: + enum: + - summary + - all + type: string + required: + - compliance + - cron + - reportType + type: object + status: + properties: + detailReport: + description: ComplianceReport represents a kubernetes scan report + properties: + description: + type: string + id: + type: string + relatedVersion: + items: + type: string + type: array + results: + items: + properties: + checks: + items: + description: ComplianceCheck provides the result of conducting + a single compliance step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links + to external resources to remediate failing check. + type: string + severity: + description: Severity level of a vulnerability or + a configuration audit check. + type: string + success: + type: boolean + target: + type: string + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + description: + type: string + id: + type: string + name: + type: string + severity: + type: string + status: + type: string + required: + - checks + type: object + type: array + title: + type: string + version: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + summary: + properties: + failCount: + type: integer + passCount: + type: integer + type: object + summaryReport: + description: SummaryReport represents a kubernetes scan report with + consolidated findings + properties: + controlCheck: + items: + properties: + id: + type: string + name: + type: string + severity: + type: string + totalFail: + type: integer + type: object + type: array + id: + type: string + title: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + updateTimestamp: + format: date-time + type: string + required: + - updateTimestamp + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_clusterconfigauditreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusterconfigauditreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ClusterConfigAuditReport + listKind: ClusterConfigAuditReportList + plural: clusterconfigauditreports + shortNames: + - clusterconfigaudit + singular: clusterconfigauditreport + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of the config audit scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of failed checks with critical severity + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of failed checks with high severity + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of failed checks with medium severity + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of failed checks with low severity + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterConfigAuditReport is a specification for the ClusterConfigAuditReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + properties: + checks: + description: Checks provides results of conducting audit steps. + items: + description: Check provides the result of conducting a single audit + step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links to external + resources to remediate failing check. + type: string + scope: + description: Scope indicates the section of config that was + audited. + properties: + type: + description: Type indicates type of this scope, e.g. Container, + ConfigMapKey or JSONPath. + type: string + value: + description: Value indicates value of this scope that depends + on Type, e.g. container name, ConfigMap key or JSONPath + expression + type: string + required: + - type + - value + type: object + severity: + description: Severity level of a vulnerability or a configuration + audit check. + type: string + success: + type: boolean + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + scanner: + description: Scanner is the spec for a scanner generating a security + assessment report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: ConfigAuditSummary counts failed checks by severity. + properties: + criticalCount: + description: CriticalCount is the number of failed checks with + critical severity. + type: integer + highCount: + description: HighCount is the number of failed checks with high + severity. + type: integer + lowCount: + description: LowCount is the number of failed check with low severity. + type: integer + mediumCount: + description: MediumCount is the number of failed checks with medium + severity. + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + updateTimestamp: + format: date-time + type: string + required: + - checks + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_clusterinfraassessmentreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusterinfraassessmentreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ClusterInfraAssessmentReport + listKind: ClusterInfraAssessmentReportList + plural: clusterinfraassessmentreports + shortNames: + - clusterinfraassessment + singular: clusterinfraassessmentreport + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of the infra assessement scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of failed checks with critical severity + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of failed checks with high severity + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of failed checks with medium severity + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of failed checks with low severity + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterInfraAssessmentReport is a specification for the ClusterInfraAssessmentReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + properties: + checks: + description: Checks provides results of conducting audit steps. + items: + description: Check provides the result of conducting a single audit + step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links to external + resources to remediate failing check. + type: string + scope: + description: Scope indicates the section of config that was + audited. + properties: + type: + description: Type indicates type of this scope, e.g. Container, + ConfigMapKey or JSONPath. + type: string + value: + description: Value indicates value of this scope that depends + on Type, e.g. container name, ConfigMap key or JSONPath + expression + type: string + required: + - type + - value + type: object + severity: + description: Severity level of a vulnerability or a configuration + audit check. + type: string + success: + type: boolean + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + scanner: + description: Scanner is the spec for a scanner generating a security + assessment report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: InfraAssessmentSummary counts failed checks by severity. + properties: + criticalCount: + description: CriticalCount is the number of failed checks with + critical severity. + type: integer + highCount: + description: HighCount is the number of failed checks with high + severity. + type: integer + lowCount: + description: LowCount is the number of failed check with low severity. + type: integer + mediumCount: + description: MediumCount is the number of failed checks with medium + severity. + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + required: + - checks + - scanner + - summary + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_clusterrbacassessmentreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusterrbacassessmentreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ClusterRbacAssessmentReport + listKind: ClusterRbacAssessmentReportList + plural: clusterrbacassessmentreports + shortNames: + - clusterrbacassessmentreport + singular: clusterrbacassessmentreport + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of the rbac assessment scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of failed checks with critical severity + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of failed checks with high severity + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of failed checks with medium severity + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of failed checks with low severity + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterRbacAssessmentReport is a specification for the ClusterRbacAssessmentReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + properties: + checks: + description: Checks provides results of conducting audit steps. + items: + description: Check provides the result of conducting a single audit + step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links to external + resources to remediate failing check. + type: string + scope: + description: Scope indicates the section of config that was + audited. + properties: + type: + description: Type indicates type of this scope, e.g. Container, + ConfigMapKey or JSONPath. + type: string + value: + description: Value indicates value of this scope that depends + on Type, e.g. container name, ConfigMap key or JSONPath + expression + type: string + required: + - type + - value + type: object + severity: + description: Severity level of a vulnerability or a configuration + audit check. + type: string + success: + type: boolean + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + scanner: + description: Scanner is the spec for a scanner generating a security + assessment report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: RbacAssessmentSummary counts failed checks by severity. + properties: + criticalCount: + description: CriticalCount is the number of failed checks with + critical severity. + type: integer + highCount: + description: HighCount is the number of failed checks with high + severity. + type: integer + lowCount: + description: LowCount is the number of failed check with low severity. + type: integer + mediumCount: + description: MediumCount is the number of failed checks with medium + severity. + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + required: + - checks + - scanner + - summary + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_clustersbomreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clustersbomreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ClusterSbomReport + listKind: ClusterSbomReportList + plural: clustersbomreports + shortNames: + - clustersbom + singular: clustersbomreport + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of image repository + jsonPath: .report.artifact.repository + name: Repository + type: string + - description: The name of image tag + jsonPath: .report.artifact.tag + name: Tag + type: string + - description: The name of the sbom generation scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of dependencies in bom + jsonPath: .report.summary.componentsCount + name: Components + priority: 1 + type: integer + - description: The the number of components in bom + jsonPath: .report.summary.dependenciesCount + name: Dependencies + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterSbomReport summarizes components and dependencies found + in container image + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + description: Report is the actual sbom report data. + properties: + artifact: + description: |- + Artifact represents a standalone, executable package of software that includes everything needed to + run an application. + properties: + digest: + description: Digest is a unique and immutable identifier of an + Artifact. + type: string + mimeType: + description: MimeType represents a type and format of an Artifact. + type: string + repository: + description: Repository is the name of the repository in the Artifact + registry. + type: string + tag: + description: Tag is a mutable, human-readable string used to identify + an Artifact. + type: string + type: object + components: + description: Bom is artifact bill of materials. + properties: + bomFormat: + type: string + components: + items: + properties: + bom-ref: + type: string + group: + type: string + hashes: + items: + properties: + alg: + type: string + content: + type: string + type: object + type: array + licenses: + items: + properties: + expression: + type: string + license: + properties: + id: + type: string + name: + type: string + url: + type: string + type: object + type: object + type: array + name: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + purl: + type: string + supplier: + properties: + contact: + items: + properties: + email: + type: string + name: + type: string + phone: + type: string + type: object + type: array + name: + type: string + url: + items: + type: string + type: array + type: object + type: + type: string + version: + type: string + type: object + type: array + dependencies: + items: + properties: + dependsOn: + items: + type: string + type: array + ref: + type: string + type: object + type: array + metadata: + properties: + component: + properties: + bom-ref: + type: string + group: + type: string + hashes: + items: + properties: + alg: + type: string + content: + type: string + type: object + type: array + licenses: + items: + properties: + expression: + type: string + license: + properties: + id: + type: string + name: + type: string + url: + type: string + type: object + type: object + type: array + name: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + purl: + type: string + supplier: + properties: + contact: + items: + properties: + email: + type: string + name: + type: string + phone: + type: string + type: object + type: array + name: + type: string + url: + items: + type: string + type: array + type: object + type: + type: string + version: + type: string + type: object + timestamp: + type: string + tools: + properties: + components: + items: + properties: + bom-ref: + type: string + group: + type: string + hashes: + items: + properties: + alg: + type: string + content: + type: string + type: object + type: array + licenses: + items: + properties: + expression: + type: string + license: + properties: + id: + type: string + name: + type: string + url: + type: string + type: object + type: object + type: array + name: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + purl: + type: string + supplier: + properties: + contact: + items: + properties: + email: + type: string + name: + type: string + phone: + type: string + type: object + type: array + name: + type: string + url: + items: + type: string + type: array + type: object + type: + type: string + version: + type: string + type: object + type: array + type: object + type: object + serialNumber: + type: string + specVersion: + type: string + version: + type: integer + required: + - bomFormat + - specVersion + type: object + registry: + description: Registry is the registry the Artifact was pulled from. + properties: + server: + description: Server the FQDN of registry server. + type: string + type: object + scanner: + description: Scanner is the scanner that generated this report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: Summary is a summary of sbom report. + properties: + componentsCount: + description: ComponentsCount is the number of components in bom. + minimum: 0 + type: integer + dependenciesCount: + description: DependenciesCount is the number of dependencies in + bom. + minimum: 0 + type: integer + required: + - componentsCount + - dependenciesCount + type: object + updateTimestamp: + description: UpdateTimestamp is a timestamp representing the server + time in UTC when this report was updated. + format: date-time + type: string + required: + - artifact + - components + - scanner + - summary + - updateTimestamp + type: object + required: + - report + type: object + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_clustervulnerabilityreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clustervulnerabilityreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ClusterVulnerabilityReport + listKind: ClusterVulnerabilityReportList + plural: clustervulnerabilityreports + shortNames: + - clustervuln + singular: clustervulnerabilityreport + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of image repository + jsonPath: .report.artifact.repository + name: Repository + type: string + - description: The name of image tag + jsonPath: .report.artifact.tag + name: Tag + type: string + - description: The name of the vulnerability scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of critical vulnerabilities + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of high vulnerabilities + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of medium vulnerabilities + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of low vulnerabilities + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + - description: The number of unknown vulnerabilities + jsonPath: .report.summary.unknownCount + name: Unknown + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ClusterVulnerabilityReport summarizes vulnerabilities in application dependencies and operating system packages + built into container images. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + description: Report is the actual vulnerability report data. + properties: + artifact: + description: |- + Artifact represents a standalone, executable package of software that includes everything needed to + run an application. + properties: + digest: + description: Digest is a unique and immutable identifier of an + Artifact. + type: string + mimeType: + description: MimeType represents a type and format of an Artifact. + type: string + repository: + description: Repository is the name of the repository in the Artifact + registry. + type: string + tag: + description: Tag is a mutable, human-readable string used to identify + an Artifact. + type: string + type: object + os: + description: OS information of the artifact + properties: + eosl: + description: Eosl is true if OS version has reached end of service + life + type: boolean + family: + description: Operating System Family + type: string + name: + description: Name or version of the OS + type: string + type: object + registry: + description: Registry is the registry the Artifact was pulled from. + properties: + server: + description: Server the FQDN of registry server. + type: string + type: object + scanner: + description: Scanner is the scanner that generated this report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: Summary is a summary of Vulnerability counts grouped + by Severity. + properties: + criticalCount: + description: CriticalCount is the number of vulnerabilities with + Critical Severity. + minimum: 0 + type: integer + highCount: + description: HighCount is the number of vulnerabilities with High + Severity. + minimum: 0 + type: integer + lowCount: + description: LowCount is the number of vulnerabilities with Low + Severity. + minimum: 0 + type: integer + mediumCount: + description: MediumCount is the number of vulnerabilities with + Medium Severity. + minimum: 0 + type: integer + noneCount: + description: NoneCount is the number of packages without any vulnerability. + minimum: 0 + type: integer + unknownCount: + description: UnknownCount is the number of vulnerabilities with + unknown severity. + minimum: 0 + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + - unknownCount + type: object + updateTimestamp: + description: UpdateTimestamp is a timestamp representing the server + time in UTC when this report was updated. + format: date-time + type: string + vulnerabilities: + description: Vulnerabilities is a list of operating system (OS) or + application software Vulnerability items found in the Artifact. + items: + description: Vulnerability is the spec for a vulnerability record. + properties: + class: + type: string + cvss: + additionalProperties: + properties: + V2Score: + type: number + V2Vector: + type: string + V3Score: + type: number + V3Vector: + type: string + V40Score: + type: number + V40Vector: + type: string + type: object + type: object + cvsssource: + type: string + description: + type: string + fixedVersion: + description: FixedVersion indicates the version of the Resource + in which this vulnerability has been fixed. + type: string + installedVersion: + description: InstalledVersion indicates the installed version + of the Resource. + type: string + lastModifiedDate: + description: LastModifiedDate indicates the last date CVE has + been modified. + type: string + links: + items: + type: string + type: array + packagePURL: + type: string + packagePath: + type: string + packageType: + type: string + primaryLink: + type: string + publishedDate: + description: PublishedDate indicates the date of published CVE. + type: string + resource: + description: Resource is a vulnerable package, application, + or library. + type: string + score: + type: number + severity: + description: Severity level of a vulnerability or a configuration + audit check. + enum: + - CRITICAL + - HIGH + - MEDIUM + - LOW + - UNKNOWN + type: string + target: + type: string + title: + type: string + vulnerabilityID: + description: VulnerabilityID the vulnerability identifier. + type: string + required: + - fixedVersion + - installedVersion + - lastModifiedDate + - publishedDate + - resource + - severity + - title + - vulnerabilityID + type: object + type: array + required: + - artifact + - os + - scanner + - summary + - updateTimestamp + - vulnerabilities + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_configauditreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: configauditreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ConfigAuditReport + listKind: ConfigAuditReportList + plural: configauditreports + shortNames: + - configaudit + - configaudits + singular: configauditreport + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of the config audit scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of failed checks with critical severity + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of failed checks with high severity + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of failed checks with medium severity + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of failed checks with low severity + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ConfigAuditReport is a specification for the ConfigAuditReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + properties: + checks: + description: Checks provides results of conducting audit steps. + items: + description: Check provides the result of conducting a single audit + step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links to external + resources to remediate failing check. + type: string + scope: + description: Scope indicates the section of config that was + audited. + properties: + type: + description: Type indicates type of this scope, e.g. Container, + ConfigMapKey or JSONPath. + type: string + value: + description: Value indicates value of this scope that depends + on Type, e.g. container name, ConfigMap key or JSONPath + expression + type: string + required: + - type + - value + type: object + severity: + description: Severity level of a vulnerability or a configuration + audit check. + type: string + success: + type: boolean + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + scanner: + description: Scanner is the spec for a scanner generating a security + assessment report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: ConfigAuditSummary counts failed checks by severity. + properties: + criticalCount: + description: CriticalCount is the number of failed checks with + critical severity. + type: integer + highCount: + description: HighCount is the number of failed checks with high + severity. + type: integer + lowCount: + description: LowCount is the number of failed check with low severity. + type: integer + mediumCount: + description: MediumCount is the number of failed checks with medium + severity. + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + updateTimestamp: + format: date-time + type: string + required: + - checks + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_exposedsecretreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: exposedsecretreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: ExposedSecretReport + listKind: ExposedSecretReportList + plural: exposedsecretreports + shortNames: + - exposedsecret + - exposedsecrets + singular: exposedsecretreport + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of image repository + jsonPath: .report.artifact.repository + name: Repository + type: string + - description: The name of image tag + jsonPath: .report.artifact.tag + name: Tag + type: string + - description: The name of the exposed secret scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of critical exposed secrets + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of high exposed secrets + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of medium exposed secrets + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of low exposed secrets + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ExposedSecretReport summarizes exposed secrets in plaintext files + built into container images. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + description: Report is the actual exposed secret report data. + properties: + artifact: + description: |- + Artifact represents a standalone, executable package of software that includes everything needed to + run an application. + properties: + digest: + description: Digest is a unique and immutable identifier of an + Artifact. + type: string + mimeType: + description: MimeType represents a type and format of an Artifact. + type: string + repository: + description: Repository is the name of the repository in the Artifact + registry. + type: string + tag: + description: Tag is a mutable, human-readable string used to identify + an Artifact. + type: string + type: object + registry: + description: Registry is the registry the Artifact was pulled from. + properties: + server: + description: Server the FQDN of registry server. + type: string + type: object + scanner: + description: Scanner is the scanner that generated this report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + secrets: + description: Exposed secrets is a list of passwords, api keys, tokens + and others items found in the Artifact. + items: + description: ExposedSecret is the spec for a exposed secret record. + properties: + category: + type: string + match: + description: Match where the exposed rule matched. + type: string + ruleID: + description: RuleID is rule the identifier. + type: string + severity: + description: Severity level of a vulnerability or a configuration + audit check. + enum: + - CRITICAL + - HIGH + - MEDIUM + - LOW + type: string + target: + description: Target is where the exposed secret was found. + type: string + title: + type: string + required: + - category + - match + - ruleID + - severity + - target + - title + type: object + type: array + summary: + description: Summary is the exposed secrets counts grouped by Severity. + properties: + criticalCount: + description: CriticalCount is the number of exposed secrets with + Critical Severity. + minimum: 0 + type: integer + highCount: + description: HighCount is the number of exposed secrets with High + Severity. + minimum: 0 + type: integer + lowCount: + description: LowCount is the number of exposed secrets with Low + Severity. + minimum: 0 + type: integer + mediumCount: + description: MediumCount is the number of exposed secrets with + Medium Severity. + minimum: 0 + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + updateTimestamp: + description: UpdateTimestamp is a timestamp representing the server + time in UTC when this report was updated. + format: date-time + type: string + required: + - artifact + - scanner + - secrets + - summary + - updateTimestamp + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_infraassessmentreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: infraassessmentreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: InfraAssessmentReport + listKind: InfraAssessmentReportList + plural: infraassessmentreports + shortNames: + - infraassessment + - infraassessments + singular: infraassessmentreport + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of the infra assessment scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of failed checks with critical severity + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of failed checks with high severity + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of failed checks with medium severity + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of failed checks with low severity + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: InfraAssessmentReport is a specification for the InfraAssessmentReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + properties: + checks: + description: Checks provides results of conducting audit steps. + items: + description: Check provides the result of conducting a single audit + step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links to external + resources to remediate failing check. + type: string + scope: + description: Scope indicates the section of config that was + audited. + properties: + type: + description: Type indicates type of this scope, e.g. Container, + ConfigMapKey or JSONPath. + type: string + value: + description: Value indicates value of this scope that depends + on Type, e.g. container name, ConfigMap key or JSONPath + expression + type: string + required: + - type + - value + type: object + severity: + description: Severity level of a vulnerability or a configuration + audit check. + type: string + success: + type: boolean + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + scanner: + description: Scanner is the spec for a scanner generating a security + assessment report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: InfraAssessmentSummary counts failed checks by severity. + properties: + criticalCount: + description: CriticalCount is the number of failed checks with + critical severity. + type: integer + highCount: + description: HighCount is the number of failed checks with high + severity. + type: integer + lowCount: + description: LowCount is the number of failed check with low severity. + type: integer + mediumCount: + description: MediumCount is the number of failed checks with medium + severity. + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + required: + - checks + - scanner + - summary + type: object + required: + - report + type: object + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_rbacassessmentreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: rbacassessmentreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: RbacAssessmentReport + listKind: RbacAssessmentReportList + plural: rbacassessmentreports + shortNames: + - rbacassessment + - rbacassessments + singular: rbacassessmentreport + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of the rbac assessment scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of failed checks with critical severity + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of failed checks with high severity + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of failed checks with medium severity + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of failed checks with low severity + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: RbacAssessmentReport is a specification for the RbacAssessmentReport + resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + properties: + checks: + description: Checks provides results of conducting audit steps. + items: + description: Check provides the result of conducting a single audit + step. + properties: + category: + type: string + checkID: + type: string + description: + type: string + messages: + items: + type: string + type: array + remediation: + description: Remediation provides description or links to external + resources to remediate failing check. + type: string + scope: + description: Scope indicates the section of config that was + audited. + properties: + type: + description: Type indicates type of this scope, e.g. Container, + ConfigMapKey or JSONPath. + type: string + value: + description: Value indicates value of this scope that depends + on Type, e.g. container name, ConfigMap key or JSONPath + expression + type: string + required: + - type + - value + type: object + severity: + description: Severity level of a vulnerability or a configuration + audit check. + type: string + success: + type: boolean + title: + type: string + required: + - checkID + - severity + - success + type: object + type: array + scanner: + description: Scanner is the spec for a scanner generating a security + assessment report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: RbacAssessmentSummary counts failed checks by severity. + properties: + criticalCount: + description: CriticalCount is the number of failed checks with + critical severity. + type: integer + highCount: + description: HighCount is the number of failed checks with high + severity. + type: integer + lowCount: + description: LowCount is the number of failed check with low severity. + type: integer + mediumCount: + description: MediumCount is the number of failed checks with medium + severity. + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + type: object + required: + - checks + - scanner + - summary + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_sbomreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: sbomreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: SbomReport + listKind: SbomReportList + plural: sbomreports + shortNames: + - sbom + - sboms + singular: sbomreport + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of image repository + jsonPath: .report.artifact.repository + name: Repository + type: string + - description: The name of image tag + jsonPath: .report.artifact.tag + name: Tag + type: string + - description: The name of the sbom generation scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of dependencies in bom + jsonPath: .report.summary.componentsCount + name: Components + priority: 1 + type: integer + - description: The the number of components in bom + jsonPath: .report.summary.dependenciesCount + name: Dependencies + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: SbomReport summarizes components and dependencies found in container + image + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + description: Report is the actual sbom report data. + properties: + artifact: + description: |- + Artifact represents a standalone, executable package of software that includes everything needed to + run an application. + properties: + digest: + description: Digest is a unique and immutable identifier of an + Artifact. + type: string + mimeType: + description: MimeType represents a type and format of an Artifact. + type: string + repository: + description: Repository is the name of the repository in the Artifact + registry. + type: string + tag: + description: Tag is a mutable, human-readable string used to identify + an Artifact. + type: string + type: object + components: + description: Bom is artifact bill of materials. + properties: + bomFormat: + type: string + components: + items: + properties: + bom-ref: + type: string + group: + type: string + hashes: + items: + properties: + alg: + type: string + content: + type: string + type: object + type: array + licenses: + items: + properties: + expression: + type: string + license: + properties: + id: + type: string + name: + type: string + url: + type: string + type: object + type: object + type: array + name: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + purl: + type: string + supplier: + properties: + contact: + items: + properties: + email: + type: string + name: + type: string + phone: + type: string + type: object + type: array + name: + type: string + url: + items: + type: string + type: array + type: object + type: + type: string + version: + type: string + type: object + type: array + dependencies: + items: + properties: + dependsOn: + items: + type: string + type: array + ref: + type: string + type: object + type: array + metadata: + properties: + component: + properties: + bom-ref: + type: string + group: + type: string + hashes: + items: + properties: + alg: + type: string + content: + type: string + type: object + type: array + licenses: + items: + properties: + expression: + type: string + license: + properties: + id: + type: string + name: + type: string + url: + type: string + type: object + type: object + type: array + name: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + purl: + type: string + supplier: + properties: + contact: + items: + properties: + email: + type: string + name: + type: string + phone: + type: string + type: object + type: array + name: + type: string + url: + items: + type: string + type: array + type: object + type: + type: string + version: + type: string + type: object + timestamp: + type: string + tools: + properties: + components: + items: + properties: + bom-ref: + type: string + group: + type: string + hashes: + items: + properties: + alg: + type: string + content: + type: string + type: object + type: array + licenses: + items: + properties: + expression: + type: string + license: + properties: + id: + type: string + name: + type: string + url: + type: string + type: object + type: object + type: array + name: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + purl: + type: string + supplier: + properties: + contact: + items: + properties: + email: + type: string + name: + type: string + phone: + type: string + type: object + type: array + name: + type: string + url: + items: + type: string + type: array + type: object + type: + type: string + version: + type: string + type: object + type: array + type: object + type: object + serialNumber: + type: string + specVersion: + type: string + version: + type: integer + required: + - bomFormat + - specVersion + type: object + registry: + description: Registry is the registry the Artifact was pulled from. + properties: + server: + description: Server the FQDN of registry server. + type: string + type: object + scanner: + description: Scanner is the scanner that generated this report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: Summary is a summary of sbom report. + properties: + componentsCount: + description: ComponentsCount is the number of components in bom. + minimum: 0 + type: integer + dependenciesCount: + description: DependenciesCount is the number of dependencies in + bom. + minimum: 0 + type: integer + required: + - componentsCount + - dependenciesCount + type: object + updateTimestamp: + description: UpdateTimestamp is a timestamp representing the server + time in UTC when this report was updated. + format: date-time + type: string + required: + - artifact + - components + - scanner + - summary + - updateTimestamp + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/charts/trivy-operator/crds/aquasecurity.github.io_vulnerabilityreports.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: vulnerabilityreports.aquasecurity.github.io +spec: + group: aquasecurity.github.io + names: + kind: VulnerabilityReport + listKind: VulnerabilityReportList + plural: vulnerabilityreports + shortNames: + - vuln + - vulns + singular: vulnerabilityreport + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of image repository + jsonPath: .report.artifact.repository + name: Repository + type: string + - description: The name of image tag + jsonPath: .report.artifact.tag + name: Tag + type: string + - description: The name of the vulnerability scanner + jsonPath: .report.scanner.name + name: Scanner + type: string + - description: The age of the report + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: The number of critical vulnerabilities + jsonPath: .report.summary.criticalCount + name: Critical + priority: 1 + type: integer + - description: The number of high vulnerabilities + jsonPath: .report.summary.highCount + name: High + priority: 1 + type: integer + - description: The number of medium vulnerabilities + jsonPath: .report.summary.mediumCount + name: Medium + priority: 1 + type: integer + - description: The number of low vulnerabilities + jsonPath: .report.summary.lowCount + name: Low + priority: 1 + type: integer + - description: The number of unknown vulnerabilities + jsonPath: .report.summary.unknownCount + name: Unknown + priority: 1 + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + VulnerabilityReport summarizes vulnerabilities in application dependencies and operating system packages + built into container images. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + report: + description: Report is the actual vulnerability report data. + properties: + artifact: + description: |- + Artifact represents a standalone, executable package of software that includes everything needed to + run an application. + properties: + digest: + description: Digest is a unique and immutable identifier of an + Artifact. + type: string + mimeType: + description: MimeType represents a type and format of an Artifact. + type: string + repository: + description: Repository is the name of the repository in the Artifact + registry. + type: string + tag: + description: Tag is a mutable, human-readable string used to identify + an Artifact. + type: string + type: object + os: + description: OS information of the artifact + properties: + eosl: + description: Eosl is true if OS version has reached end of service + life + type: boolean + family: + description: Operating System Family + type: string + name: + description: Name or version of the OS + type: string + type: object + registry: + description: Registry is the registry the Artifact was pulled from. + properties: + server: + description: Server the FQDN of registry server. + type: string + type: object + scanner: + description: Scanner is the scanner that generated this report. + properties: + name: + description: Name the name of the scanner. + type: string + vendor: + description: Vendor the name of the vendor providing the scanner. + type: string + version: + description: Version the version of the scanner. + type: string + required: + - name + - vendor + - version + type: object + summary: + description: Summary is a summary of Vulnerability counts grouped + by Severity. + properties: + criticalCount: + description: CriticalCount is the number of vulnerabilities with + Critical Severity. + minimum: 0 + type: integer + highCount: + description: HighCount is the number of vulnerabilities with High + Severity. + minimum: 0 + type: integer + lowCount: + description: LowCount is the number of vulnerabilities with Low + Severity. + minimum: 0 + type: integer + mediumCount: + description: MediumCount is the number of vulnerabilities with + Medium Severity. + minimum: 0 + type: integer + noneCount: + description: NoneCount is the number of packages without any vulnerability. + minimum: 0 + type: integer + unknownCount: + description: UnknownCount is the number of vulnerabilities with + unknown severity. + minimum: 0 + type: integer + required: + - criticalCount + - highCount + - lowCount + - mediumCount + - unknownCount + type: object + updateTimestamp: + description: UpdateTimestamp is a timestamp representing the server + time in UTC when this report was updated. + format: date-time + type: string + vulnerabilities: + description: Vulnerabilities is a list of operating system (OS) or + application software Vulnerability items found in the Artifact. + items: + description: Vulnerability is the spec for a vulnerability record. + properties: + class: + type: string + cvss: + additionalProperties: + properties: + V2Score: + type: number + V2Vector: + type: string + V3Score: + type: number + V3Vector: + type: string + V40Score: + type: number + V40Vector: + type: string + type: object + type: object + cvsssource: + type: string + description: + type: string + fixedVersion: + description: FixedVersion indicates the version of the Resource + in which this vulnerability has been fixed. + type: string + installedVersion: + description: InstalledVersion indicates the installed version + of the Resource. + type: string + lastModifiedDate: + description: LastModifiedDate indicates the last date CVE has + been modified. + type: string + links: + items: + type: string + type: array + packagePURL: + type: string + packagePath: + type: string + packageType: + type: string + primaryLink: + type: string + publishedDate: + description: PublishedDate indicates the date of published CVE. + type: string + resource: + description: Resource is a vulnerable package, application, + or library. + type: string + score: + type: number + severity: + description: Severity level of a vulnerability or a configuration + audit check. + enum: + - CRITICAL + - HIGH + - MEDIUM + - LOW + - UNKNOWN + type: string + target: + type: string + title: + type: string + vulnerabilityID: + description: VulnerabilityID the vulnerability identifier. + type: string + required: + - fixedVersion + - installedVersion + - lastModifiedDate + - publishedDate + - resource + - severity + - title + - vulnerabilityID + type: object + type: array + required: + - artifact + - os + - scanner + - summary + - updateTimestamp + - vulnerabilities + type: object + required: + - report + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: {} + +--- +# Source: trivy/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: trivy + labels: + app.kubernetes.io/name: trivy + app.kubernetes.io/instance: trivy + app.kubernetes.io/part-of: trivy + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: trivy/charts/trivy-operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: trivy-trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +--- +# Source: trivy/charts/trivy-operator/templates/secrets/operator.yaml +apiVersion: v1 +kind: Secret +metadata: + name: trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +data: +--- +# Source: trivy/charts/trivy-operator/templates/secrets/trivy.yaml +apiVersion: v1 +kind: Secret +metadata: + name: trivy-operator-trivy-config + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +data: +--- +# Source: trivy/charts/trivy-operator/templates/configmaps/operator.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +data: + nodeCollector.tolerations: "[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/control-plane\",\"operator\":\"Exists\"}]" + nodeCollector.volumes: "[{\"hostPath\":{\"path\":\"/var/lib/etcd\"},\"name\":\"var-lib-etcd\"},{\"hostPath\":{\"path\":\"/var/lib/kubelet\"},\"name\":\"var-lib-kubelet\"},{\"hostPath\":{\"path\":\"/var/lib/kube-scheduler\"},\"name\":\"var-lib-kube-scheduler\"},{\"hostPath\":{\"path\":\"/var/lib/kube-controller-manager\"},\"name\":\"var-lib-kube-controller-manager\"},{\"hostPath\":{\"path\":\"/etc/kubernetes\"},\"name\":\"etc-kubernetes\"},{\"hostPath\":{\"path\":\"/etc/cni/net.d/\"},\"name\":\"etc-cni-netd\"}]" + nodeCollector.volumeMounts: "[{\"mountPath\":\"/var/lib/etcd\",\"name\":\"var-lib-etcd\",\"readOnly\":true},{\"mountPath\":\"/var/lib/kubelet\",\"name\":\"var-lib-kubelet\",\"readOnly\":true},{\"mountPath\":\"/var/lib/kube-scheduler\",\"name\":\"var-lib-kube-scheduler\",\"readOnly\":true},{\"mountPath\":\"/var/lib/kube-controller-manager\",\"name\":\"var-lib-kube-controller-manager\",\"readOnly\":true},{\"mountPath\":\"/etc/kubernetes\",\"name\":\"etc-kubernetes\",\"readOnly\":true},{\"mountPath\":\"/etc/cni/net.d/\",\"name\":\"etc-cni-netd\",\"readOnly\":true}]" + scanJob.useGCRServiceAccount: "true" + scanJob.podTemplateContainerSecurityContext: "{\"allowPrivilegeEscalation\":false,\"capabilities\":{\"drop\":[\"ALL\"]},\"privileged\":false,\"readOnlyRootFilesystem\":true}" + scanJob.compressLogs: "true" + vulnerabilityReports.scanner: "Trivy" + vulnerabilityReports.scanJobsInSameNamespace: "false" + configAuditReports.scanner: "Trivy" + report.recordFailedChecksOnly: "true" + node.collector.imageRef: "ghcr.io/aquasecurity/node-collector:0.3.1" + policies.bundle.oci.ref: "mirror.gcr.io/aquasec/trivy-checks:1" + policies.bundle.insecure: "false" + + node.collector.nodeSelector: "true" +--- +# Source: trivy/charts/trivy-operator/templates/configmaps/trivy-operator-config.yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: trivy-operator-config + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +data: + OPERATOR_LOG_DEV_MODE: "false" + OPERATOR_SCAN_JOB_TTL: "" + OPERATOR_SCAN_JOB_TIMEOUT: "5m" + OPERATOR_CONCURRENT_SCAN_JOBS_LIMIT: "10" + OPERATOR_CONCURRENT_NODE_COLLECTOR_LIMIT: "1" + OPERATOR_SCAN_JOB_RETRY_AFTER: "30s" + OPERATOR_BATCH_DELETE_LIMIT: "10" + OPERATOR_BATCH_DELETE_DELAY: "10s" + OPERATOR_METRICS_BIND_ADDRESS: ":8080" + OPERATOR_METRICS_FINDINGS_ENABLED: "true" + OPERATOR_METRICS_VULN_ID_ENABLED: "false" + OPERATOR_HEALTH_PROBE_BIND_ADDRESS: ":9090" + OPERATOR_PPROF_BIND_ADDRESS: "" + OPERATOR_VULNERABILITY_SCANNER_ENABLED: "false" + OPERATOR_SBOM_GENERATION_ENABLED: "false" + OPERATOR_CLUSTER_SBOM_CACHE_ENABLED: "false" + OPERATOR_VULNERABILITY_SCANNER_SCAN_ONLY_CURRENT_REVISIONS: "true" + OPERATOR_SCANNER_REPORT_TTL: "24h" + OPERATOR_CACHE_REPORT_TTL: "120h" + CONTROLLER_CACHE_SYNC_TIMEOUT: "5m" + OPERATOR_CONFIG_AUDIT_SCANNER_ENABLED: "true" + OPERATOR_RBAC_ASSESSMENT_SCANNER_ENABLED: "true" + OPERATOR_INFRA_ASSESSMENT_SCANNER_ENABLED: "false" + OPERATOR_CONFIG_AUDIT_SCANNER_SCAN_ONLY_CURRENT_REVISIONS: "true" + OPERATOR_EXPOSED_SECRET_SCANNER_ENABLED: "true" + OPERATOR_METRICS_EXPOSED_SECRET_INFO_ENABLED: "false" + OPERATOR_METRICS_CONFIG_AUDIT_INFO_ENABLED: "false" + OPERATOR_METRICS_RBAC_ASSESSMENT_INFO_ENABLED: "false" + OPERATOR_METRICS_INFRA_ASSESSMENT_INFO_ENABLED: "false" + OPERATOR_METRICS_IMAGE_INFO_ENABLED: "false" + OPERATOR_METRICS_CLUSTER_COMPLIANCE_INFO_ENABLED: "false" + OPERATOR_WEBHOOK_BROADCAST_URL: "" + OPERATOR_WEBHOOK_BROADCAST_TIMEOUT: "30s" + OPERATOR_WEBHOOK_BROADCAST_CUSTOM_HEADERS: "" + OPERATOR_SEND_DELETED_REPORTS: "false" + OPERATOR_PRIVATE_REGISTRY_SCAN_SECRETS_NAMES: "{}" + OPERATOR_ACCESS_GLOBAL_SECRETS_SERVICE_ACCOUNTS: "true" + OPERATOR_BUILT_IN_TRIVY_SERVER: "false" + TRIVY_SERVER_HEALTH_CHECK_CACHE_EXPIRATION: "10h" + OPERATOR_MERGE_RBAC_FINDING_WITH_CONFIG_AUDIT: "false" + OPERATOR_CLUSTER_COMPLIANCE_ENABLED: "false" +--- +# Source: trivy/charts/trivy-operator/templates/configmaps/trivy.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: trivy-operator-trivy-config + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +data: + trivy.repository: "mirror.gcr.io/aquasec/trivy" + trivy.tag: "0.67.2" + trivy.imagePullPolicy: "IfNotPresent" + trivy.additionalVulnerabilityReportFields: "" + trivy.registry.mirror.gcr.io: "proxy-gcr.io" + trivy.registry.mirror.ghcr.io: "proxy-ghcr.io" + trivy.registry.mirror.hub.docker: "proxy-hub.docker" + trivy.registry.mirror.quay.io: "proxy-quay.io" + trivy.registry.mirror.registry-1.docker.io: "proxy-registry-1.docker.io" + trivy.registry.mirror.registry.k8s.io: "proxy-registry.k8s" + trivy.severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" + trivy.slow: "true" + trivy.skipJavaDBUpdate: "false" + trivy.includeDevDeps: "false" + trivy.imageScanCacheDir: "/tmp/trivy/.cache" + trivy.filesystemScanCacheDir: "/var/trivyoperator/trivy-db" + trivy.dbRepository: "mirror.gcr.io/aquasec/trivy-db" + trivy.javaDbRepository: "mirror.gcr.io/aquasec/trivy-java-db" + trivy.command: "image" + trivy.sbomSources: "" + trivy.dbRepositoryInsecure: "false" + trivy.useBuiltinRegoPolicies: "false" + trivy.useEmbeddedRegoPolicies: "true" + trivy.supportedConfigAuditKinds: "Workload,Service,Role,ClusterRole,NetworkPolicy,Ingress,LimitRange,ResourceQuota" + trivy.timeout: "5m0s" + trivy.mode: "Standalone" + trivy.resources.requests.cpu: "100m" + trivy.resources.requests.memory: "128M" + trivy.resources.limits.cpu: "500m" + trivy.resources.limits.memory: "500M" +--- +# Source: trivy/charts/trivy-operator/templates/rbac/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: trivy-operator +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - limitranges + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list +- apiGroups: + - "" + resources: + - replicationcontrollers + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - resourcequotas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - apps.openshift.io + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clustercompliancedetailreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clustercompliancereports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clustercompliancereports/status + verbs: + - get + - patch + - update +- apiGroups: + - aquasecurity.github.io + resources: + - clusterconfigauditreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clusterinfraassessmentreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clusterrbacassessmentreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clustersbomreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - clustervulnerabilityreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - configauditreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - exposedsecretreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - infraassessmentreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - rbacassessmentreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - sbomreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - aquasecurity.github.io + resources: + - vulnerabilityreports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get +- apiGroups: + - "" + resources: + - nodes/proxy + verbs: + - get +--- +# Source: trivy/charts/trivy-operator/templates/rbac/view-configauditreports-clusterrole.yaml +# permissions for end users to view configauditreports +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aggregate-config-audit-reports-view + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" +rules: + - apiGroups: + - aquasecurity.github.io + resources: + - configauditreports + verbs: + - get + - list + - watch +--- +# Source: trivy/charts/trivy-operator/templates/rbac/view-exposedsecretreports-clusterrole.yaml +# permissions for end users to view exposedsecretreports +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aggregate-exposed-secret-reports-view + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" +rules: + - apiGroups: + - aquasecurity.github.io + resources: + - exposedsecretreports + verbs: + - get + - list + - watch +--- +# Source: trivy/charts/trivy-operator/templates/rbac/view-vulnerabilityreports-clusterrole.yaml +# permissions for end users to view vulnerabilityreports +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aggregate-vulnerability-reports-view + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" +rules: + - apiGroups: + - aquasecurity.github.io + resources: + - vulnerabilityreports + verbs: + - get + - list + - watch +--- +# Source: trivy/charts/trivy-operator/templates/rbac/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: trivy-trivy-operator + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: trivy-operator +subjects: + - kind: ServiceAccount + name: trivy-trivy-operator + namespace: trivy +--- +# Source: trivy/charts/trivy-operator/templates/rbac/leader-election-role.yaml +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: trivy-trivy-operator-leader-election + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create +--- +# Source: trivy/charts/trivy-operator/templates/rbac/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: trivy-trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - delete + - update +--- +# Source: trivy/charts/trivy-operator/templates/rbac/leader-election-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: trivy-trivy-operator-leader-election + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: trivy-trivy-operator-leader-election +subjects: + - kind: ServiceAccount + name: trivy-trivy-operator + namespace: trivy +--- +# Source: trivy/charts/trivy-operator/templates/rbac/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: trivy-trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: trivy-trivy-operator +subjects: + - kind: ServiceAccount + name: trivy-trivy-operator + namespace: trivy +--- +# Source: trivy/charts/trivy-operator/templates/monitor/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: trivy-trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +spec: + clusterIP: None + ports: + - name: metrics + port: 80 + targetPort: metrics + protocol: TCP + appProtocol: TCP + selector: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + type: ClusterIP +--- +# Source: trivy/charts/trivy-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: trivy-trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + template: + metadata: + labels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + spec: + serviceAccountName: trivy-trivy-operator + automountServiceAccountToken: true + containers: + - name: "trivy-operator" + image: "mirror.gcr.io/aquasec/trivy-operator:0.29.0" + imagePullPolicy: IfNotPresent + env: + - name: OPERATOR_NAMESPACE + value: trivy + - name: OPERATOR_TARGET_NAMESPACES + value: "" + - name: OPERATOR_EXCLUDE_NAMESPACES + value: "" + - name: OPERATOR_TARGET_WORKLOADS + value: "pod,replicaset,replicationcontroller,statefulset,daemonset,cronjob,job" + - name: OPERATOR_SERVICE_ACCOUNT + value: "trivy-trivy-operator" + envFrom: + - configMapRef: + name: trivy-operator-config + ports: + - name: metrics + containerPort: 8080 + - name: probes + containerPort: 9090 + readinessProbe: + httpGet: + path: /readyz/ + port: probes + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /healthz/ + port: probes + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 10 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: cache-policies + readOnly: false + volumes: + - emptyDir: {} + name: cache-policies +--- +# Source: trivy/charts/trivy-operator/templates/specs/k8s-cis-1.23.yaml +apiVersion: aquasecurity.github.io/v1alpha1 +kind: ClusterComplianceReport +metadata: + name: k8s-cis-1.23 + labels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy-operator + app.kubernetes.io/version: 0.29.0 + app.kubernetes.io/managed-by: kubectl +spec: + cron: "0 5 * * *" + reportType: "summary" + compliance: + id: k8s-cis-1.23 + title: CIS Kubernetes Benchmarks v1.23 + description: CIS Kubernetes Benchmarks + platform: k8s + type: cis + relatedResources: + - https://www.cisecurity.org/benchmark/kubernetes + version: "1.23" + controls: + - id: 1.1.1 + name: Ensure that the API server pod specification file permissions are set to + 600 or more restrictive + description: Ensure that the API server pod specification file has permissions + of 600 or more restrictive + checks: + - id: AVD-KCV-0048 + commands: + - id: CMD-0001 + severity: HIGH + - id: 1.1.2 + name: Ensure that the API server pod specification file ownership is set to + root:root + description: Ensure that the API server pod specification file ownership is set + to root:root + checks: + - id: AVD-KCV-0049 + commands: + - id: CMD-0002 + severity: HIGH + - id: 1.1.3 + name: Ensure that the controller manager pod specification file permissions are + set to 600 or more restrictive + description: Ensure that the controller manager pod specification file has + permissions of 600 or more restrictive + checks: + - id: AVD-KCV-0050 + commands: + - id: CMD-0003 + severity: HIGH + - id: 1.1.4 + name: Ensure that the controller manager pod specification file ownership is set + to root:root + description: Ensure that the controller manager pod specification file ownership + is set to root:root + checks: + - id: AVD-KCV-0051 + commands: + - id: CMD-0004 + severity: HIGH + - id: 1.1.5 + name: Ensure that the scheduler pod specification file permissions are set to + 600 or more restrictive + description: Ensure that the scheduler pod specification file has permissions of + 600 or more restrictive + checks: + - id: AVD-KCV-0052 + commands: + - id: CMD-0005 + severity: HIGH + - id: 1.1.6 + name: Ensure that the scheduler pod specification file ownership is set to + root:root + description: Ensure that the scheduler pod specification file ownership is set + to root:root + checks: + - id: AVD-KCV-0053 + commands: + - id: CMD-0006 + severity: HIGH + - id: 1.1.7 + name: Ensure that the etcd pod specification file permissions are set to 600 or + more restrictive + description: Ensure that the etcd pod specification file has permissions of 600 + or more restrictive + checks: + - id: AVD-KCV-0054 + commands: + - id: CMD-0007 + severity: HIGH + - id: 1.1.8 + name: Ensure that the etcd pod specification file ownership is set to root:root + description: Ensure that the etcd pod specification file ownership is set to + root:root. + checks: + - id: AVD-KCV-0055 + commands: + - id: CMD-0008 + severity: HIGH + - id: 1.1.9 + name: Ensure that the Container Network Interface file permissions are set to + 600 or more restrictive + description: Ensure that the Container Network Interface files have permissions + of 600 or more restrictive + checks: + - id: AVD-KCV-0056 + commands: + - id: CMD-0009 + severity: HIGH + - id: 1.1.10 + name: Ensure that the Container Network Interface file ownership is set to + root:root + description: Ensure that the Container Network Interface files have ownership + set to root:root + checks: + - id: AVD-KCV-0057 + commands: + - id: CMD-0010 + severity: HIGH + - id: 1.1.11 + name: Ensure that the etcd data directory permissions are set to 700 or more + restrictive + description: Ensure that the etcd data directory has permissions of 700 or more + restrictive + checks: + - id: AVD-KCV-0058 + commands: + - id: CMD-0011 + severity: HIGH + - id: 1.1.12 + name: Ensure that the etcd data directory ownership is set to etcd:etcd + description: Ensure that the etcd data directory ownership is set to etcd:etcd + checks: + - id: AVD-KCV-0059 + commands: + - id: CMD-0012 + severity: LOW + - id: 1.1.13 + name: Ensure that the admin.conf file permissions are set to 600 + description: Ensure that the admin.conf file has permissions of 600 + checks: + - id: AVD-KCV-0060 + commands: + - id: CMD-0013 + severity: CRITICAL + - id: 1.1.14 + name: Ensure that the admin.conf file ownership is set to root:root + description: Ensure that the admin.conf file ownership is set to root:root + checks: + - id: AVD-KCV-0061 + commands: + - id: CMD-0014 + severity: CRITICAL + - id: 1.1.15 + name: Ensure that the scheduler.conf file permissions are set to 600 or more + restrictive + description: Ensure that the scheduler.conf file has permissions of 600 or more + restrictive + checks: + - id: AVD-KCV-0062 + commands: + - id: CMD-0015 + severity: HIGH + - id: 1.1.16 + name: Ensure that the scheduler.conf file ownership is set to root:root + description: Ensure that the scheduler.conf file ownership is set to root:root + checks: + - id: AVD-KCV-0063 + commands: + - id: CMD-0016 + severity: HIGH + - id: 1.1.17 + name: Ensure that the controller-manager.conf file permissions are set to 600 or + more restrictive + description: Ensure that the controller-manager.conf file has permissions of 600 + or more restrictive + checks: + - id: AVD-KCV-0064 + commands: + - id: CMD-0017 + severity: HIGH + - id: 1.1.18 + name: Ensure that the controller-manager.conf file ownership is set to root:root + description: Ensure that the controller-manager.conf file ownership is set to + root:root. + checks: + - id: AVD-KCV-0065 + commands: + - id: CMD-0018 + severity: HIGH + - id: 1.1.19 + name: Ensure that the Kubernetes PKI directory and file ownership is set to + root:root + description: Ensure that the Kubernetes PKI directory and file ownership is set + to root:root + checks: + - id: AVD-KCV-0066 + commands: + - id: CMD-0019 + severity: CRITICAL + - id: 1.1.20 + name: Ensure that the Kubernetes PKI certificate file permissions are set to 600 + or more restrictive + description: Ensure that Kubernetes PKI certificate files have permissions of + 600 or more restrictive + checks: + - id: AVD-KCV-0068 + commands: + - id: CMD-0020 + severity: CRITICAL + - id: 1.1.21 + name: Ensure that the Kubernetes PKI key file permissions are set to 600 + description: Ensure that Kubernetes PKI key files have permissions of 600 + checks: + - id: AVD-KCV-0067 + commands: + - id: CMD-0021 + severity: CRITICAL + - id: 1.2.1 + name: Ensure that the --anonymous-auth argument is set to false + description: Disable anonymous requests to the API server + checks: + - id: AVD-KCV-0001 + severity: MEDIUM + - id: 1.2.2 + name: Ensure that the --token-auth-file parameter is not set + description: Do not use token based authentication + checks: + - id: AVD-KCV-0002 + severity: LOW + - id: 1.2.3 + name: Ensure that the --DenyServiceExternalIPs is not set + description: This admission controller rejects all net-new usage of the Service + field externalIPs + checks: + - id: AVD-KCV-0003 + severity: LOW + - id: 1.2.4 + name: Ensure that the --kubelet-https argument is set to true + description: Use https for kubelet connections + checks: + - id: AVD-KCV-0004 + severity: LOW + - id: 1.2.5 + name: Ensure that the --kubelet-client-certificate and --kubelet-client-key + arguments are set as appropriate + description: Enable certificate based kubelet authentication + checks: + - id: AVD-KCV-0005 + severity: HIGH + - id: 1.2.6 + name: Ensure that the --kubelet-certificate-authority argument is set as + appropriate + description: Verify kubelets certificate before establishing connection + checks: + - id: AVD-KCV-0006 + severity: HIGH + - id: 1.2.7 + name: Ensure that the --authorization-mode argument is not set to AlwaysAllow + description: Do not always authorize all requests + checks: + - id: AVD-KCV-0007 + severity: LOW + - id: 1.2.8 + name: Ensure that the --authorization-mode argument includes Node + description: Restrict kubelet nodes to reading only objects associated with them + checks: + - id: AVD-KCV-0008 + severity: HIGH + - id: 1.2.9 + name: Ensure that the --authorization-mode argument includes RBAC + description: Turn on Role Based Access Control + checks: + - id: AVD-KCV-0009 + severity: HIGH + - id: 1.2.10 + name: Ensure that the admission control plugin EventRateLimit is set + description: Limit the rate at which the API server accepts requests + checks: + - id: AVD-KCV-0010 + severity: HIGH + - id: 1.2.11 + name: Ensure that the admission control plugin AlwaysAdmit is not set + description: Do not allow all requests + checks: + - id: AVD-KCV-0011 + severity: LOW + - id: 1.2.12 + name: Ensure that the admission control plugin AlwaysPullImages is set + description: Always pull images + checks: + - id: AVD-KCV-0012 + severity: MEDIUM + - id: 1.2.13 + name: Ensure that the admission control plugin SecurityContextDeny is set if + PodSecurityPolicy is not used + description: The SecurityContextDeny admission controller can be used to deny + pods which make use of some SecurityContext fields which could allow for + privilege escalation in the cluster. This should be used where + PodSecurityPolicy is not in place within the cluster + checks: + - id: AVD-KCV-0013 + severity: MEDIUM + - id: 1.2.14 + name: Ensure that the admission control plugin ServiceAccount is set + description: Automate service accounts management + checks: + - id: AVD-KCV-0014 + severity: LOW + - id: 1.2.15 + name: Ensure that the admission control plugin NamespaceLifecycle is set + description: Reject creating objects in a namespace that is undergoing termination + checks: + - id: AVD-KCV-0015 + severity: LOW + - id: 1.2.16 + name: Ensure that the admission control plugin NodeRestriction is set + description: Limit the Node and Pod objects that a kubelet could modify + checks: + - id: AVD-KCV-0016 + severity: LOW + - id: 1.2.17 + name: Ensure that the --secure-port argument is not set to 0 + description: Do not disable the secure port + checks: + - id: AVD-KCV-0017 + severity: HIGH + - id: 1.2.18 + name: Ensure that the --profiling argument is set to false + description: Disable profiling, if not needed + checks: + - id: AVD-KCV-0018 + severity: LOW + - id: 1.2.19 + name: Ensure that the --audit-log-path argument is set + description: Enable auditing on the Kubernetes API Server and set the desired + audit log path. + checks: + - id: AVD-KCV-0019 + severity: LOW + - id: 1.2.20 + name: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate + description: Retain the logs for at least 30 days or as appropriate + checks: + - id: AVD-KCV-0020 + severity: LOW + - id: 1.2.21 + name: Ensure that the --audit-log-maxbackup argument is set to 10 or as + appropriate + description: Retain 10 or an appropriate number of old log file + checks: + - id: AVD-KCV-0021 + severity: LOW + - id: 1.2.22 + name: Ensure that the --audit-log-maxsize argument is set to 100 or as + appropriate + description: Rotate log files on reaching 100 MB or as appropriate + checks: + - id: AVD-KCV-0022 + severity: LOW + - id: 1.2.24 + name: Ensure that the --service-account-lookup argument is set to true + description: Validate service account before validating token + checks: + - id: AVD-KCV-0024 + severity: LOW + - id: 1.2.25 + name: Ensure that the --service-account-key-file argument is set as appropriate + description: Explicitly set a service account public key file for service + accounts on the apiserver + checks: + - id: AVD-KCV-0025 + severity: LOW + - id: 1.2.26 + name: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as + appropriate + description: etcd should be configured to make use of TLS encryption for client + connections + checks: + - id: AVD-KCV-0026 + severity: LOW + - id: 1.2.27 + name: Ensure that the --tls-cert-file and --tls-private-key-file arguments are + set as appropriate + description: Setup TLS connection on the API server + checks: + - id: AVD-KCV-0027 + severity: MEDIUM + - id: 1.2.28 + name: Ensure that the --client-ca-file argument is set appropriate + description: Setup TLS connection on the API server + checks: + - id: AVD-KCV-0028 + severity: LOW + - id: 1.2.29 + name: Ensure that the --etcd-cafile argument is set as appropriate + description: etcd should be configured to make use of TLS encryption for client + connections. + checks: + - id: AVD-KCV-0029 + severity: LOW + - id: 1.2.30 + name: Ensure that the --encryption-provider-config argument is set as + appropriate + description: Encrypt etcd key-value store + checks: + - id: AVD-KCV-0030 + severity: LOW + - id: 1.3.1 + name: Ensure that the --terminated-pod-gc-threshold argument is set as + appropriate + description: Activate garbage collector on pod termination, as appropriate + checks: + - id: AVD-KCV-0033 + severity: MEDIUM + - id: 1.3.3 + name: Ensure that the --use-service-account-credentials argument is set to true + description: Use individual service account credentials for each controller + checks: + - id: AVD-KCV-0035 + severity: MEDIUM + - id: 1.3.4 + name: Ensure that the --service-account-private-key-file argument is set as + appropriate + description: Explicitly set a service account private key file for service + accounts on the controller manager + checks: + - id: AVD-KCV-0036 + severity: MEDIUM + - id: 1.3.5 + name: Ensure that the --root-ca-file argument is set as appropriate + description: Allow pods to verify the API servers serving certificate before + establishing connections + checks: + - id: AVD-KCV-0037 + severity: MEDIUM + - id: 1.3.6 + name: Ensure that the RotateKubeletServerCertificate argument is set to true + description: Enable kubelet server certificate rotation on controller-manager + checks: + - id: AVD-KCV-0038 + severity: MEDIUM + - id: 1.3.7 + name: Ensure that the --bind-address argument is set to 127.0.0.1 + description: Do not bind the scheduler service to non-loopback insecure addresses + checks: + - id: AVD-KCV-0039 + severity: LOW + - id: 1.4.1 + name: Ensure that the --profiling argument is set to false + description: Disable profiling, if not needed + checks: + - id: AVD-KCV-0034 + severity: MEDIUM + - id: 1.4.2 + name: Ensure that the --bind-address argument is set to 127.0.0.1 + description: Do not bind the scheduler service to non-loopback insecure addresses + checks: + - id: AVD-KCV-0041 + severity: CRITICAL + - id: "2.1" + name: Ensure that the --cert-file and --key-file arguments are set as + appropriate + description: Configure TLS encryption for the etcd service + checks: + - id: AVD-KCV-0042 + severity: MEDIUM + - id: "2.2" + name: Ensure that the --client-cert-auth argument is set to true + description: Enable client authentication on etcd service + checks: + - id: AVD-KCV-0043 + severity: CRITICAL + - id: "2.3" + name: Ensure that the --auto-tls argument is not set to true + description: Do not use self-signed certificates for TLS + checks: + - id: AVD-KCV-0044 + severity: CRITICAL + - id: "2.4" + name: Ensure that the --peer-cert-file and --peer-key-file arguments are set as + appropriate + description: etcd should be configured to make use of TLS encryption for peer + connections. + checks: + - id: AVD-KCV-0045 + severity: CRITICAL + - id: "2.5" + name: Ensure that the --peer-client-cert-auth argument is set to true + description: etcd should be configured for peer authentication + checks: + - id: AVD-KCV-0046 + severity: CRITICAL + - id: "2.6" + name: Ensure that the --peer-auto-tls argument is not set to true + description: Do not use self-signed certificates for TLS + checks: + - id: AVD-KCV-0047 + severity: HIGH + - id: 3.1.1 + name: Client certificate authentication should not be used for users (Manual) + description: Kubernetes provides the option to use client certificates for user + authentication. However as there is no way to revoke these certificates + when a user leaves an organization or loses their credential, they are + not suitable for this purpose + severity: HIGH + - id: 3.2.1 + name: Ensure that a minimal audit policy is created (Manual) + description: Kubernetes can audit the details of requests made to the API + server. The --audit- policy-file flag must be set for this logging to be + enabled. + severity: HIGH + - id: 3.2.2 + name: Ensure that the audit policy covers key security concerns (Manual) + description: Ensure that the audit policy created for the cluster covers key + security concerns + severity: HIGH + - id: 4.1.1 + name: Ensure that the kubelet service file permissions are set to 600 or more + restrictive + description: Ensure that the kubelet service file has permissions of 600 or more + restrictive. + checks: + - id: AVD-KCV-0069 + commands: + - id: CMD-0022 + severity: HIGH + - id: 4.1.2 + name: Ensure that the kubelet service file ownership is set to root:root + description: Ensure that the kubelet service file ownership is set to root:root + checks: + - id: AVD-KCV-0070 + commands: + - id: CMD-0023 + severity: HIGH + - id: 4.1.3 + name: If proxy kubeconfig file exists ensure permissions are set to 600 or more + restrictive + description: If kube-proxy is running, and if it is using a file-based + kubeconfig file, ensure that the proxy kubeconfig file has permissions + of 600 or more restrictive + checks: + - id: AVD-KCV-0071 + commands: + - id: CMD-0024 + severity: HIGH + - id: 4.1.4 + name: If proxy kubeconfig file exists ensure ownership is set to root:root + description: If kube-proxy is running, ensure that the file ownership of its + kubeconfig file is set to root:root + checks: + - id: AVD-KCV-0072 + commands: + - id: CMD-0025 + severity: HIGH + - id: 4.1.5 + name: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 + or more restrictive + description: Ensure that the kubelet.conf file has permissions of 600 or more + restrictive + checks: + - id: AVD-KCV-0073 + commands: + - id: CMD-0026 + severity: HIGH + - id: 4.1.6 + name: Ensure that the --kubeconfig kubelet.conf file ownership is set to + root:root + description: Ensure that the kubelet.conf file ownership is set to root:root + checks: + - id: AVD-KCV-0074 + commands: + - id: CMD-0027 + severity: HIGH + - id: 4.1.7 + name: Ensure that the certificate authorities file permissions are set to 600 or + more restrictive + description: Ensure that the certificate authorities file has permissions of 600 + or more restrictive + checks: + - id: AVD-KCV-0075 + commands: + - id: CMD-0028 + severity: CRITICAL + - id: 4.1.8 + name: Ensure that the client certificate authorities file ownership is set to + root:root + description: Ensure that the certificate authorities file ownership is set to + root:root + checks: + - id: AVD-KCV-0076 + commands: + - id: CMD-0029 + severity: CRITICAL + - id: 4.1.9 + name: If the kubelet config.yaml configuration file is being used validate + permissions set to 600 or more restrictive + description: Ensure that if the kubelet refers to a configuration file with the + --config argument, that file has permissions of 600 or more restrictive + checks: + - id: AVD-KCV-0077 + commands: + - id: CMD-0030 + severity: HIGH + - id: 4.1.10 + name: If the kubelet config.yaml configuration file is being used validate file + ownership is set to root:root + description: Ensure that if the kubelet refers to a configuration file with the + --config argument, that file is owned by root:root + checks: + - id: AVD-KCV-0078 + commands: + - id: CMD-0031 + severity: HIGH + - id: 4.2.1 + name: Ensure that the --anonymous-auth argument is set to false + description: Disable anonymous requests to the Kubelet server + checks: + - id: AVD-KCV-0079 + commands: + - id: CMD-0032 + severity: CRITICAL + - id: 4.2.2 + name: Ensure that the --authorization-mode argument is not set to AlwaysAllow + description: Do not allow all requests. Enable explicit authorization + checks: + - id: AVD-KCV-0080 + commands: + - id: CMD-0033 + severity: CRITICAL + - id: 4.2.3 + name: Ensure that the --client-ca-file argument is set as appropriate + description: Enable Kubelet authentication using certificates + checks: + - id: AVD-KCV-0081 + commands: + - id: CMD-0034 + severity: CRITICAL + - id: 4.2.4 + name: Verify that the --read-only-port argument is set to 0 + description: Disable the read-only port + checks: + - id: AVD-KCV-0082 + commands: + - id: CMD-0035 + severity: HIGH + - id: 4.2.5 + name: Ensure that the --streaming-connection-idle-timeout argument is not set to + 0 + description: Do not disable timeouts on streaming connections + checks: + - id: AVD-KCV-0085 + commands: + - id: CMD-0036 + severity: HIGH + - id: 4.2.6 + name: Ensure that the --protect-kernel-defaults argument is set to true + description: Protect tuned kernel parameters from overriding kubelet default + kernel parameter values + checks: + - id: AVD-KCV-0083 + commands: + - id: CMD-0037 + severity: HIGH + - id: 4.2.7 + name: Ensure that the --make-iptables-util-chains argument is set to true + description: Allow Kubelet to manage iptables + checks: + - id: AVD-KCV-0084 + commands: + - id: CMD-0038 + severity: HIGH + - id: 4.2.8 + name: Ensure that the --hostname-override argument is not set + description: Do not override node hostnames + checks: + - id: AVD-KCV-0086 + commands: + - id: CMD-0039 + severity: HIGH + - id: 4.2.9 + name: Ensure that the --event-qps argument is set to 0 or a level which ensures + appropriate event capture + description: Security relevant information should be captured. The --event-qps + flag on the Kubelet can be used to limit the rate at which events are + gathered + checks: + - id: AVD-KCV-0087 + commands: + - id: CMD-0040 + severity: HIGH + - id: 4.2.10 + name: Ensure that the --tls-cert-file and --tls-private-key-file arguments are + set as appropriate + description: Setup TLS connection on the Kubelets + checks: + - id: AVD-KCV-0088 + - id: AVD-KCV-0089 + commands: + - id: CMD-0041 + - id: CMD-0042 + severity: CRITICAL + - id: 4.2.11 + name: Ensure that the --rotate-certificates argument is not set to false + description: Enable kubelet client certificate rotation + checks: + - id: AVD-KCV-0090 + commands: + - id: CMD-0043 + severity: CRITICAL + - id: 4.2.12 + name: Verify that the RotateKubeletServerCertificate argument is set to true + description: Enable kubelet server certificate rotation + checks: + - id: AVD-KCV-0091 + commands: + - id: CMD-0044 + severity: CRITICAL + - id: 4.2.13 + name: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers + description: Ensure that the Kubelet is configured to only use strong + cryptographic ciphers + checks: + - id: AVD-KCV-0092 + commands: + - id: CMD-0045 + severity: CRITICAL + - id: 5.1.1 + name: Ensure that the cluster-admin role is only used where required + description: The RBAC role cluster-admin provides wide-ranging powers over the + environment and should be used only where and when needed + checks: + - id: AVD-KSV-0111 + severity: HIGH + - id: 5.1.2 + name: Minimize access to secrets + description: The Kubernetes API stores secrets, which may be service account + tokens for the Kubernetes API or credentials used by workloads in the + cluster + checks: + - id: AVD-KSV-0041 + severity: HIGH + - id: 5.1.3 + name: Minimize wildcard use in Roles and ClusterRoles + description: Kubernetes Roles and ClusterRoles provide access to resources based + on sets of objects and actions that can be taken on those objects. It is + possible to set either of these to be the wildcard "*" which matches all + items + checks: + - id: AVD-KSV-0044 + - id: AVD-KSV-0045 + - id: AVD-KSV-0046 + severity: HIGH + - id: 5.1.6 + name: Ensure that Service Account Tokens are only mounted where necessary + description: Service accounts tokens should not be mounted in pods except where + the workload running in the pod explicitly needs to communicate with the + API server + checks: + - id: AVD-KSV-0036 + severity: HIGH + - id: 5.1.8 + name: Limit use of the Bind, Impersonate and Escalate permissions in the + Kubernetes cluster + description: Cluster roles and roles with the impersonate, bind or escalate + permissions should not be granted unless strictly required + checks: + - id: AVD-KSV-0043 + severity: HIGH + - id: 5.2.2 + name: Minimize the admission of privileged containers + description: Do not generally permit containers to be run with the + securityContext.privileged flag set to true + checks: + - id: AVD-KSV-0017 + severity: HIGH + - id: 5.2.3 + name: Minimize the admission of containers wishing to share the host process ID + namespace + description: Do not generally permit containers to be run with the hostPID flag + set to true. + checks: + - id: AVD-KSV-0010 + severity: HIGH + - id: 5.2.4 + name: Minimize the admission of containers wishing to share the host IPC + namespace + description: Do not generally permit containers to be run with the hostIPC flag + set to true + checks: + - id: AVD-KSV-0008 + severity: HIGH + - id: 5.2.5 + name: Minimize the admission of containers wishing to share the host network + namespace + description: Do not generally permit containers to be run with the hostNetwork + flag set to true + checks: + - id: AVD-KSV-0009 + severity: HIGH + - id: 5.2.6 + name: Minimize the admission of containers with allowPrivilegeEscalation + description: Do not generally permit containers to be run with the + allowPrivilegeEscalation flag set to true + checks: + - id: AVD-KSV-0001 + severity: HIGH + - id: 5.2.7 + name: Minimize the admission of root containers + description: Do not generally permit containers to be run as the root user + checks: + - id: AVD-KSV-0012 + severity: MEDIUM + - id: 5.2.8 + name: Minimize the admission of containers with the NET_RAW capability + description: Do not generally permit containers with the potentially dangerous + NET_RAW capability + checks: + - id: AVD-KSV-0022 + severity: MEDIUM + - id: 5.2.9 + name: Minimize the admission of containers with added capabilities + description: Do not generally permit containers with capabilities assigned + beyond the default set + checks: + - id: AVD-KSV-0004 + severity: LOW + - id: 5.2.10 + name: Minimize the admission of containers with capabilities assigned + description: Do not generally permit containers with capabilities + checks: + - id: AVD-KSV-0003 + severity: LOW + - id: 5.2.11 + name: Minimize the admission of containers with capabilities assigned + description: Do not generally permit containers with capabilities + checks: + - id: AVD-KSV-0103 + severity: MEDIUM + - id: 5.2.12 + name: Minimize the admission of HostPath volumes + description: Do not generally admit containers which make use of hostPath volumes + checks: + - id: AVD-KSV-0023 + severity: MEDIUM + - id: 5.2.13 + name: Minimize the admission of containers which use HostPorts + description: Do not generally permit containers which require the use of HostPorts + checks: + - id: AVD-KSV-0024 + severity: MEDIUM + - id: 5.3.1 + name: Ensure that the CNI in use supports Network Policies (Manual) + description: There are a variety of CNI plugins available for Kubernetes. If the + CNI in use does not support Network Policies it may not be possible to + effectively restrict traffic in the cluster + severity: MEDIUM + - id: 5.3.2 + name: Ensure that all Namespaces have Network Policies defined + description: Use network policies to isolate traffic in your cluster network + checks: + - id: AVD-KSV-0038 + severity: MEDIUM + - id: 5.4.1 + name: Prefer using secrets as files over secrets as environment variables + (Manual) + description: Kubernetes supports mounting secrets as data volumes or as + environment variables. Minimize the use of environment variable secrets + severity: MEDIUM + - id: 5.4.2 + name: Consider external secret storage (Manual) + description: Consider the use of an external secrets storage and management + system, instead of using Kubernetes Secrets directly, if you have more + complex secret management needs + severity: MEDIUM + - id: 5.5.1 + name: Configure Image Provenance using ImagePolicyWebhook admission controller + (Manual) + description: Configure Image Provenance for your deployment + severity: MEDIUM + - id: 5.7.1 + name: Create administrative boundaries between resources using namespaces + (Manual) + description: Use namespaces to isolate your Kubernetes objects + severity: MEDIUM + - id: 5.7.2 + name: Ensure that the seccomp profile is set to docker/default in your pod + definitions + description: Enable docker/default seccomp profile in your pod definitions + checks: + - id: AVD-KSV-0104 + severity: MEDIUM + - id: 5.7.3 + name: Apply Security Context to Your Pods and Containers + description: Apply Security Context to Your Pods and Containers + checks: + - id: AVD-KSV-0021 + - id: AVD-KSV-0020 + - id: AVD-KSV-0005 + - id: AVD-KSV-0025 + - id: AVD-KSV-0104 + - id: AVD-KSV-0030 + severity: HIGH + - id: 5.7.4 + name: The default namespace should not be used + description: Kubernetes provides a default namespace, where objects are placed + if no namespace is specified for them + checks: + - id: AVD-KSV-0110 + severity: MEDIUM +--- +# Source: trivy/charts/trivy-operator/templates/specs/k8s-nsa-1.0.yaml +apiVersion: aquasecurity.github.io/v1alpha1 +kind: ClusterComplianceReport +metadata: + name: k8s-nsa-1.0 + labels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy-operator + app.kubernetes.io/version: 0.29.0 + app.kubernetes.io/managed-by: kubectl +spec: + cron: "0 5 * * *" + reportType: "summary" + compliance: + id: k8s-nsa-1.0 + platform: k8s + type: nsa + title: National Security Agency - Kubernetes Hardening Guidance v1.0 + description: National Security Agency - Kubernetes Hardening Guidance + relatedResources: + - https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/ + version: "1.0" + controls: + - name: Non-root containers + description: Check that container is not running as root + id: "1.0" + checks: + - id: AVD-KSV-0012 + severity: MEDIUM + - name: Immutable container file systems + description: Check that container root file system is immutable + id: "1.1" + checks: + - id: AVD-KSV-0014 + severity: LOW + - name: Preventing privileged containers + description: Controls whether Pods can run privileged containers + id: "1.2" + checks: + - id: AVD-KSV-0017 + severity: HIGH + - name: Share containers process namespaces + description: Controls whether containers can share process namespaces + id: "1.3" + checks: + - id: AVD-KSV-0008 + severity: HIGH + - name: Share host process namespaces + description: Controls whether share host process namespaces + id: "1.4" + checks: + - id: AVD-KSV-0009 + severity: HIGH + - name: Use the host network + description: Controls whether containers can use the host network + id: "1.5" + checks: + - id: AVD-KSV-0010 + severity: HIGH + - name: Run with root privileges or with root group membership + description: Controls whether container applications can run with root + privileges or with root group membership + id: "1.6" + checks: + - id: AVD-KSV-0029 + severity: LOW + - name: Restricts escalation to root privileges + description: Control check restrictions escalation to root privileges + id: "1.7" + checks: + - id: AVD-KSV-0001 + severity: MEDIUM + - name: Sets the SELinux context of the container + description: Control checks if pod sets the SELinux context of the container + id: "1.8" + checks: + - id: AVD-KSV-0002 + severity: MEDIUM + - name: Restrict a container's access to resources with AppArmor + description: Control checks the restriction of containers access to resources + with AppArmor + id: "1.9" + checks: + - id: AVD-KSV-0030 + severity: MEDIUM + - name: Sets the seccomp profile used to sandbox containers. + description: Control checks the sets the seccomp profile used to sandbox containers + id: "1.10" + checks: + - id: AVD-KSV-0030 + severity: LOW + - name: Protecting Pod service account tokens + description: "Control check whether disable secret token been mount + ,automountServiceAccountToken: false" + id: "1.11" + checks: + - id: AVD-KSV-0036 + severity: MEDIUM + - name: Namespace kube-system should not be used by users + description: Control check whether Namespace kube-system is not be used by users + id: "1.12" + defaultStatus: FAIL + checks: + - id: AVD-KSV-0037 + severity: MEDIUM + - name: Pod and/or namespace Selectors usage + description: Control check validate the pod and/or namespace Selectors usage + id: "2.0" + defaultStatus: FAIL + checks: + - id: AVD-KSV-0038 + severity: MEDIUM + - name: Use CNI plugin that supports NetworkPolicy API (Manual) + description: Control check whether check cni plugin installed + id: "3.0" + defaultStatus: FAIL + severity: CRITICAL + - name: Use ResourceQuota policies to limit resources + description: Control check the use of ResourceQuota policy to limit aggregate + resource usage within namespace + id: "4.0" + defaultStatus: FAIL + checks: + - id: AVD-KSV-0040 + severity: MEDIUM + - name: Use LimitRange policies to limit resources + description: Control check the use of LimitRange policy limit resource usage for + namespaces or nodes + id: "4.1" + defaultStatus: FAIL + checks: + - id: AVD-KSV-0039 + severity: MEDIUM + - name: Control plan disable insecure port (Manual) + description: Control check whether control plan disable insecure port + id: "5.0" + defaultStatus: FAIL + severity: CRITICAL + - name: Encrypt etcd communication + description: Control check whether etcd communication is encrypted + id: "5.1" + checks: + - id: AVD-KCV-0030 + severity: CRITICAL + - name: Ensure kube config file permission (Manual) + description: Control check whether kube config file permissions + id: "6.0" + defaultStatus: FAIL + severity: CRITICAL + - name: Check that encryption resource has been set + description: Control checks whether encryption resource has been set + id: "6.1" + checks: + - id: AVD-KCV-0029 + severity: CRITICAL + - name: Check encryption provider + description: Control checks whether encryption provider has been set + id: "6.2" + checks: + - id: AVD-KCV-0004 + severity: CRITICAL + - name: Make sure anonymous-auth is unset + description: Control checks whether anonymous-auth is unset + id: "7.0" + checks: + - id: AVD-KCV-0001 + severity: CRITICAL + - name: Make sure -authorization-mode=RBAC + description: Control check whether RBAC permission is in use + id: "7.1" + checks: + - id: AVD-KCV-0008 + severity: CRITICAL + - name: Audit policy is configure (Manual) + description: Control check whether audit policy is configure + id: "8.0" + defaultStatus: FAIL + severity: HIGH + - name: Audit log path is configure + description: Control check whether audit log path is configure + id: "8.1" + checks: + - id: AVD-KCV-0019 + severity: MEDIUM + - name: Audit log aging + description: Control check whether audit log aging is configure + id: "8.2" + checks: + - id: AVD-KCV-0020 + severity: MEDIUM +--- +# Source: trivy/charts/trivy-operator/templates/specs/k8s-pss-baseline-0.1.yaml +apiVersion: aquasecurity.github.io/v1alpha1 +kind: ClusterComplianceReport +metadata: + name: k8s-pss-baseline-0.1 + labels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy-operator + app.kubernetes.io/version: 0.29.0 + app.kubernetes.io/managed-by: kubectl +spec: + cron: "0 5 * * *" + reportType: "summary" + compliance: + id: k8s-pss-baseline-0.1 + platform: eks + type: pss-baseline + title: Kubernetes Pod Security Standards - Baseline + description: Kubernetes Pod Security Standards - Baseline + relatedResources: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + version: "0.1" + controls: + - name: HostProcess + description: Windows pods offer the ability to run HostProcess containers which + enables privileged access to the Windows node. Privileged access to + the host is disallowed in the baseline policy + id: "1" + checks: + - id: AVD-KSV-0103 + severity: HIGH + - name: Host Namespaces + description: Sharing the host namespaces must be disallowed. + id: "2" + checks: + - id: AVD-KSV-0008 + severity: HIGH + - name: Privileged Containers + description: Privileged Pods disable most security mechanisms and must be + disallowed. + id: "3" + checks: + - id: AVD-KSV-0017 + severity: HIGH + - name: Capabilities + description: Adding additional capabilities beyond those listed below must be + disallowed. + id: "4" + checks: + - id: AVD-KSV-0022 + severity: MEDIUM + - name: HostPath Volumes + description: HostPath volumes must be forbidden. + id: "5" + checks: + - id: AVD-KSV-0023 + severity: MEDIUM + - name: host ports + description: hostports should be disallowed, or at minimum restricted to a known + list. + id: "6" + checks: + - id: avd-ksv-0024 + severity: HIGH + - name: AppArmor + description: On supported hosts, the runtime/default AppArmor profile is applied + by default. The baseline policy should prevent overriding or disabling + the default AppArmor profile, or restrict overrides to an allowed set + of profiles. + id: "7" + checks: + - id: avd-ksv-0002 + severity: HIGH + - name: SELinux + description: Setting the SELinux type is restricted, and setting a custom + SELinux user or role option is forbidden. + id: "8" + checks: + - id: avd-ksv-0025 + severity: MEDIUM + - name: /proc Mount Type + description: The default /proc masks are set up to reduce attack surface, and + should be required. + id: "9" + checks: + - id: avd-ksv-0027 + severity: MEDIUM + - name: Seccomp + description: Seccomp profile must not be explicitly set to Unconfined. + id: "10" + checks: + - id: avd-ksv-0104 + severity: MEDIUM + - name: Sysctls + description: Sysctls can disable security mechanisms or affect all containers on + a host, and should be disallowed except for an allowed 'safe' subset. + A sysctl is considered safe if it is namespaced in the container or + the Pod, and it is isolated from other Pods or processes on the same + Node. + id: "11" + checks: + - id: avd-ksv-0026 + severity: MEDIUM +--- +# Source: trivy/charts/trivy-operator/templates/specs/k8s-pss-restricted-0.1.yaml +apiVersion: aquasecurity.github.io/v1alpha1 +kind: ClusterComplianceReport +metadata: + name: k8s-pss-restricted-0.1 + labels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy-operator + app.kubernetes.io/version: 0.29.0 + app.kubernetes.io/managed-by: kubectl +spec: + cron: "0 5 * * *" + reportType: "summary" + compliance: + id: k8s-pss-restricted-0.1 + platform: k8s + type: pss-restricted + title: Kubernetes Pod Security Standards - Restricted + description: Kubernetes Pod Security Standards - Restricted + relatedResources: + - https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + version: "0.1" + controls: + - name: HostProcess + description: Windows pods offer the ability to run HostProcess containers which + enables privileged access to the Windows node. Privileged access to + the host is disallowed in the baseline policy + id: "1" + checks: + - id: AVD-KSV-0103 + severity: HIGH + - name: Host Namespaces + description: Sharing the host namespaces must be disallowed. + id: "2" + checks: + - id: AVD-KSV-0008 + severity: HIGH + - name: Privileged Containers + description: Privileged Pods disable most security mechanisms and must be + disallowed. + id: "3" + checks: + - id: AVD-KSV-0017 + severity: HIGH + - name: Capabilities + description: Adding additional capabilities beyond those listed below must be + disallowed. + id: "4" + checks: + - id: AVD-KSV-0022 + severity: MEDIUM + - name: HostPath Volumes + description: HostPath volumes must be forbidden. + id: "5" + checks: + - id: AVD-KSV-0023 + severity: MEDIUM + - name: host ports + description: hostports should be disallowed, or at minimum restricted to a known + list. + id: "6" + checks: + - id: avd-ksv-0024 + severity: HIGH + - name: AppArmor + description: On supported hosts, the runtime/default AppArmor profile is applied + by default. The baseline policy should prevent overriding or disabling + the default AppArmor profile, or restrict overrides to an allowed set + of profiles. + id: "7" + checks: + - id: avd-ksv-0002 + severity: HIGH + - name: SELinux + description: Setting the SELinux type is restricted, and setting a custom + SELinux user or role option is forbidden. + id: "8" + checks: + - id: avd-ksv-0025 + severity: MEDIUM + - name: /proc Mount Type + description: The default /proc masks are set up to reduce attack surface, and + should be required. + id: "9" + checks: + - id: avd-ksv-0027 + severity: MEDIUM + - name: Seccomp + description: Seccomp profile must not be explicitly set to Unconfined. + id: "10" + checks: + - id: avd-ksv-0104 + severity: MEDIUM + - name: Sysctls + description: Sysctls can disable security mechanisms or affect all containers on + a host, and should be disallowed except for an allowed 'safe' subset. + A sysctl is considered safe if it is namespaced in the container or + the Pod, and it is isolated from other Pods or processes on the same + Node. + id: "11" + checks: + - id: avd-ksv-0026 + severity: MEDIUM + - name: Volume Types + description: The restricted policy only permits specific volume types. + id: "12" + checks: + - id: avd-ksv-0028 + severity: LOW + - name: Privilege Escalation + description: Privilege escalation (such as via set-user-ID or set-group-ID file + mode) should not be allowed. + id: "13" + checks: + - id: avd-ksv-0001 + severity: MEDIUM + - name: Running as Non-root + description: Containers must be required to run as non-root users. + id: "14" + checks: + - id: avd-ksv-0012 + severity: MEDIUM + - name: Running as Non-root user + description: Containers must not set runAsUser to 0 + id: "15" + checks: + - id: avd-ksv-0105 + severity: LOW + - name: Seccomp + description: Seccomp profile must be explicitly set to one of the allowed + values. Both the Unconfined profile and the absence of a profile are + prohibited + id: "16" + checks: + - id: avd-ksv-0030 + severity: LOW + - name: Capabilities + description: Containers must drop ALL capabilities, and are only permitted to + add back the NET_BIND_SERVICE capability. + id: "17" + checks: + - id: avd-ksv-0106 + severity: LOW +--- +# Source: trivy/charts/trivy-operator/templates/monitor/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: trivy-trivy-operator + namespace: trivy + labels: + helm.sh/chart: trivy-operator-0.31.0 + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + app.kubernetes.io/version: "0.29.0" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy + endpoints: + - honorLabels: true + port: metrics + scheme: http diff --git a/clusters/cl01tl/manifests/unpoller/unpoller.yaml b/clusters/cl01tl/manifests/unpoller/unpoller.yaml new file mode 100644 index 000000000..c0b8115ed --- /dev/null +++ b/clusters/cl01tl/manifests/unpoller/unpoller.yaml @@ -0,0 +1,151 @@ +--- +# Source: unpoller/charts/unpoller/templates/common.yaml +apiVersion: v1 +kind: Service +metadata: + name: unpoller + labels: + app.kubernetes.io/instance: unpoller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: unpoller + app.kubernetes.io/service: unpoller + helm.sh/chart: unpoller-4.4.0 + namespace: unpoller +spec: + type: ClusterIP + ports: + - port: 9130 + targetPort: 9130 + protocol: TCP + name: metrics + selector: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: unpoller + app.kubernetes.io/name: unpoller +--- +# Source: unpoller/charts/unpoller/templates/common.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unpoller + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: unpoller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: unpoller + helm.sh/chart: unpoller-4.4.0 + namespace: unpoller +spec: + revisionHistoryLimit: 3 + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app.kubernetes.io/controller: main + app.kubernetes.io/name: unpoller + app.kubernetes.io/instance: unpoller + template: + metadata: + labels: + app.kubernetes.io/controller: main + app.kubernetes.io/instance: unpoller + app.kubernetes.io/name: unpoller + spec: + enableServiceLinks: false + serviceAccountName: default + automountServiceAccountToken: true + hostIPC: false + hostNetwork: false + hostPID: false + dnsPolicy: ClusterFirst + containers: + - env: + - name: UP_UNIFI_CONTROLLER_0_SAVE_ALARMS + value: "false" + - name: UP_UNIFI_CONTROLLER_0_SAVE_ANOMALIES + value: "false" + - name: UP_UNIFI_CONTROLLER_0_SAVE_DPI + value: "false" + - name: UP_UNIFI_CONTROLLER_0_SAVE_EVENTS + value: "false" + - name: UP_UNIFI_CONTROLLER_0_SAVE_IDS + value: "false" + - name: UP_UNIFI_CONTROLLER_0_SAVE_SITES + value: "true" + - name: UP_UNIFI_CONTROLLER_0_URL + value: https://unifi.alexlebens.net/ + - name: UP_UNIFI_CONTROLLER_0_VERIFY_SSL + value: "false" + - name: UP_INFLUXDB_DISABLE + value: "true" + - name: UP_PROMETHEUS_HTTP_LISTEN + value: 0.0.0.0:9130 + - name: UP_PROMETHEUS_NAMESPACE + value: unpoller + - name: UP_POLLER_DEBUG + value: "false" + - name: UP_POLLER_QUIET + value: "false" + envFrom: + - secretRef: + name: unpoller-unifi-secret + image: ghcr.io/unpoller/unpoller:v2.15.4 + imagePullPolicy: IfNotPresent + name: main + resources: + requests: + cpu: 10m + memory: 64Mi +--- +# Source: unpoller/templates/external-secret.yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: unpoller-unifi-secret + namespace: unpoller + labels: + app.kubernetes.io/name: unpoller-unifi-secret + app.kubernetes.io/instance: unpoller + app.kubernetes.io/part-of: unpoller +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + data: + - secretKey: UP_UNIFI_CONTROLLER_0_USER + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /unifi/auth/cl01tl + metadataPolicy: None + property: user + - secretKey: UP_UNIFI_CONTROLLER_0_PASS + remoteRef: + conversionStrategy: Default + decodingStrategy: None + key: /unifi/auth/cl01tl + metadataPolicy: None + property: password +--- +# Source: unpoller/templates/service-monitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: unpoller + namespace: unpoller + labels: + app.kubernetes.io/name: unpoller + app.kubernetes.io/instance: unpoller + app.kubernetes.io/part-of: unpoller +spec: + selector: + matchLabels: + app.kubernetes.io/name: unpoller + app.kubernetes.io/instance: unpoller + endpoints: + - port: metrics + interval: 30s + scrapeTimeout: 10s + path: /metrics